comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Yes, thanks. Fixed!
private void updateAllocationMetrics(NodeList nodes) { Map<ClusterKey, List<Node>> byApplication = nodes.stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> new ClusterKey(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byApplication.forEach((clusterKey, allocatedNodes) -> { int activeNodes = 0; int nonActiveNodes = 0; for (var node : allocatedNodes) { if (node.state() == State.active) { activeNodes++; } else { nonActiveNodes++; } } double nonActiveFraction; if (activeNodes == 0) { nonActiveFraction = 1; } else { nonActiveFraction = (double) nonActiveNodes / (double) activeNodes; } Map<String, String> dimensions = new HashMap<>(dimensions(clusterKey.application)); dimensions.put("clusterId", clusterKey.cluster.value()); metric.set("nodes.active", activeNodes, getContext(dimensions)); metric.set("nodes.nonActive", nonActiveNodes, getContext(dimensions)); metric.set("nodes.nonActiveFraction", nonActiveFraction, getContext(dimensions)); }); }
Map<ClusterKey, List<Node>> byApplication = nodes.stream()
private void updateAllocationMetrics(NodeList nodes) { Map<ClusterKey, List<Node>> byCluster = nodes.stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> new ClusterKey(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterKey, allocatedNodes) -> { int activeNodes = 0; int nonActiveNodes = 0; for (var node : allocatedNodes) { if (node.state() == State.active) { activeNodes++; } else { nonActiveNodes++; } } double nonActiveFraction; if (activeNodes == 0) { nonActiveFraction = 1; } else { nonActiveFraction = (double) nonActiveNodes / (double) activeNodes; } Map<String, String> dimensions = new HashMap<>(dimensions(clusterKey.application)); dimensions.put("clusterId", clusterKey.cluster.value()); metric.set("nodes.active", activeNodes, getContext(dimensions)); metric.set("nodes.nonActive", nonActiveNodes, getContext(dimensions)); metric.set("nodes.nonActiveFraction", nonActiveFraction, getContext(dimensions)); }); }
class MetricsReporter extends NodeRepositoryMaintainer { private final Metric metric; private final Orchestrator orchestrator; private final ServiceMonitor serviceMonitor; private final Map<Map<String, String>, Metric.Context> contextMap = new HashMap<>(); private final Supplier<Integer> pendingRedeploymentsSupplier; MetricsReporter(NodeRepository nodeRepository, Metric metric, Orchestrator orchestrator, ServiceMonitor serviceMonitor, Supplier<Integer> pendingRedeploymentsSupplier, Duration interval) { super(nodeRepository, interval, metric); this.metric = metric; this.orchestrator = orchestrator; this.serviceMonitor = serviceMonitor; this.pendingRedeploymentsSupplier = pendingRedeploymentsSupplier; } @Override public boolean maintain() { NodeList nodes = nodeRepository().list(); ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot(); updateZoneMetrics(); updateCacheMetrics(); updateMaintenanceMetrics(); nodes.forEach(node -> updateNodeMetrics(node, serviceModel)); updateNodeCountMetrics(nodes); updateLockMetrics(); updateDockerMetrics(nodes); updateTenantUsageMetrics(nodes); updateAllocationMetrics(nodes); return true; } private void updateZoneMetrics() { metric.set("zone.working", nodeRepository().isWorking() ? 1 : 0, null); } private void updateCacheMetrics() { CacheStats nodeCacheStats = nodeRepository().database().nodeSerializerCacheStats(); metric.set("cache.nodeObject.hitRate", nodeCacheStats.hitRate(), null); metric.set("cache.nodeObject.evictionCount", nodeCacheStats.evictionCount(), null); metric.set("cache.nodeObject.size", nodeCacheStats.size(), null); CacheStats curatorCacheStats = nodeRepository().database().cacheStats(); metric.set("cache.curator.hitRate", curatorCacheStats.hitRate(), null); metric.set("cache.curator.evictionCount", curatorCacheStats.evictionCount(), null); metric.set("cache.curator.size", curatorCacheStats.size(), null); } private void updateMaintenanceMetrics() { metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null); } private void updateNodeMetrics(Node node, ServiceModel serviceModel) { Metric.Context context; Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) { ApplicationId applicationId = allocation.get().owner(); Map<String, String> dimensions = new HashMap<>(dimensions(applicationId)); dimensions.put("state", node.state().name()); dimensions.put("host", node.hostname()); dimensions.put("clustertype", allocation.get().membership().cluster().type().name()); dimensions.put("clusterid", allocation.get().membership().cluster().id().value()); context = getContext(dimensions); long wantedRestartGeneration = allocation.get().restartGeneration().wanted(); metric.set("wantedRestartGeneration", wantedRestartGeneration, context); long currentRestartGeneration = allocation.get().restartGeneration().current(); metric.set("currentRestartGeneration", currentRestartGeneration, context); boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration; metric.set("wantToRestart", wantToRestart ? 1 : 0, context); metric.set("retired", allocation.get().membership().retired() ? 1 : 0, context); Version wantedVersion = allocation.get().membership().cluster().vespaVersion(); double wantedVersionNumber = getVersionAsNumber(wantedVersion); metric.set("wantedVespaVersion", wantedVersionNumber, context); Optional<Version> currentVersion = node.status().vespaVersion(); boolean converged = currentVersion.isPresent() && currentVersion.get().equals(wantedVersion); metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context); } else { context = getContext(Map.of("state", node.state().name(), "host", node.hostname())); } Optional<Version> currentVersion = node.status().vespaVersion(); if (currentVersion.isPresent()) { double currentVersionNumber = getVersionAsNumber(currentVersion.get()); metric.set("currentVespaVersion", currentVersionNumber, context); } long wantedRebootGeneration = node.status().reboot().wanted(); metric.set("wantedRebootGeneration", wantedRebootGeneration, context); long currentRebootGeneration = node.status().reboot().current(); metric.set("currentRebootGeneration", currentRebootGeneration, context); boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration; metric.set("wantToReboot", wantToReboot ? 1 : 0, context); metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context); metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context); metric.set("failReport", NodeFailer.reasonsToFailParentHost(node).isEmpty() ? 0 : 1, context); HostName hostname = new HostName(node.hostname()); serviceModel.getApplication(hostname) .map(ApplicationInstance::reference) .map(reference -> orchestrator.getHostInfo(reference, hostname)) .ifPresent(info -> { int suspended = info.status().isSuspended() ? 1 : 0; metric.set("suspended", suspended, context); metric.set("allowedToBeDown", suspended, context); long suspendedSeconds = info.suspendedSince() .map(suspendedSince -> Duration.between(suspendedSince, clock().instant()).getSeconds()) .orElse(0L); metric.set("suspendedSeconds", suspendedSeconds, context); }); long numberOfServices; List<ServiceInstance> services = serviceModel.getServiceInstancesByHostName().get(hostname); if (services == null) { numberOfServices = 0; } else { Map<ServiceStatus, Long> servicesCount = services.stream().collect( Collectors.groupingBy(ServiceInstance::serviceStatus, Collectors.counting())); numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum(); metric.set( "numberOfServicesUp", servicesCount.getOrDefault(ServiceStatus.UP, 0L), context); metric.set( "numberOfServicesNotChecked", servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L), context); long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L); metric.set("numberOfServicesDown", numberOfServicesDown, context); metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context); boolean down = NodeHealthTracker.allDown(services); metric.set("nodeFailerBadNode", (down ? 1 : 0), context); boolean nodeDownInNodeRepo = node.history().event(History.Event.Type.down).isPresent(); metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context); } metric.set("numberOfServices", numberOfServices, context); } private static String toApp(ApplicationId applicationId) { return applicationId.application().value() + "." + applicationId.instance().value(); } /** * A version 6.163.20 will be returned as a number 163.020. The major * version can normally be inferred. As long as the micro version stays * below 1000 these numbers sort like Version. */ private static double getVersionAsNumber(Version version) { return version.getMinor() + version.getMicro() / 1000.0; } private Metric.Context getContext(Map<String, String> dimensions) { return contextMap.computeIfAbsent(dimensions, metric::createContext); } private void updateNodeCountMetrics(NodeList nodes) { Map<State, List<Node>> nodesByState = nodes.nodeType(NodeType.tenant).asList().stream() .collect(Collectors.groupingBy(Node::state)); for (State state : State.values()) { List<Node> nodesInState = nodesByState.getOrDefault(state, List.of()); metric.set("hostedVespa." + state.name() + "Hosts", nodesInState.size(), null); } } private void updateLockMetrics() { LockStats.getGlobal().getLockMetricsByPath() .forEach((lockPath, lockMetrics) -> { Metric.Context context = getContext(Map.of("lockPath", lockPath)); metric.set("lockAttempt.acquire", lockMetrics.getAndResetAcquireCount(), context); metric.set("lockAttempt.acquireFailed", lockMetrics.getAndResetAcquireFailedCount(), context); metric.set("lockAttempt.acquireTimedOut", lockMetrics.getAndResetAcquireTimedOutCount(), context); metric.set("lockAttempt.locked", lockMetrics.getAndResetAcquireSucceededCount(), context); metric.set("lockAttempt.release", lockMetrics.getAndResetReleaseCount(), context); metric.set("lockAttempt.releaseFailed", lockMetrics.getAndResetReleaseFailedCount(), context); metric.set("lockAttempt.reentry", lockMetrics.getAndResetReentryCount(), context); metric.set("lockAttempt.deadlock", lockMetrics.getAndResetDeadlockCount(), context); metric.set("lockAttempt.nakedRelease", lockMetrics.getAndResetNakedReleaseCount(), context); metric.set("lockAttempt.acquireWithoutRelease", lockMetrics.getAndResetAcquireWithoutReleaseCount(), context); metric.set("lockAttempt.foreignRelease", lockMetrics.getAndResetForeignReleaseCount(), context); setLockLatencyMetrics("acquire", lockMetrics.getAndResetAcquireLatencyMetrics(), context); setLockLatencyMetrics("locked", lockMetrics.getAndResetLockedLatencyMetrics(), context); }); } private void setLockLatencyMetrics(String name, LatencyMetrics latencyMetrics, Metric.Context context) { metric.set("lockAttempt." + name + "Latency", latencyMetrics.latencySeconds(), context); metric.set("lockAttempt." + name + "MaxActiveLatency", latencyMetrics.maxActiveLatencySeconds(), context); metric.set("lockAttempt." + name + "Hz", latencyMetrics.startHz(), context); metric.set("lockAttempt." + name + "Load", latencyMetrics.load(), context); } private void updateDockerMetrics(NodeList nodes) { NodeResources totalCapacity = getCapacityTotal(nodes); metric.set("hostedVespa.docker.totalCapacityCpu", totalCapacity.vcpu(), null); metric.set("hostedVespa.docker.totalCapacityMem", totalCapacity.memoryGb(), null); metric.set("hostedVespa.docker.totalCapacityDisk", totalCapacity.diskGb(), null); NodeResources totalFreeCapacity = getFreeCapacityTotal(nodes); metric.set("hostedVespa.docker.freeCapacityCpu", totalFreeCapacity.vcpu(), null); metric.set("hostedVespa.docker.freeCapacityMem", totalFreeCapacity.memoryGb(), null); metric.set("hostedVespa.docker.freeCapacityDisk", totalFreeCapacity.diskGb(), null); } private void updateTenantUsageMetrics(NodeList nodes) { nodes.nodeType(NodeType.tenant).stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner())) .forEach( (applicationId, applicationNodes) -> { var allocatedCapacity = applicationNodes.stream() .map(node -> node.allocation().get().requestedResources().justNumbers()) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); var context = getContext(dimensions(applicationId)); metric.set("hostedVespa.docker.allocatedCapacityCpu", allocatedCapacity.vcpu(), context); metric.set("hostedVespa.docker.allocatedCapacityMem", allocatedCapacity.memoryGb(), context); metric.set("hostedVespa.docker.allocatedCapacityDisk", allocatedCapacity.diskGb(), context); } ); } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenantName", application.tenant().value(), "applicationId", application.serializedForm().replace(':', '.'), "app", toApp(application)); } private static NodeResources getCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(host -> host.flavor().resources()) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); } private static NodeResources getFreeCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(n -> freeCapacityOf(nodes, n)) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); } private static NodeResources freeCapacityOf(NodeList nodes, Node dockerHost) { return nodes.childrenOf(dockerHost).asList().stream() .map(node -> node.flavor().resources().justNumbers()) .reduce(dockerHost.flavor().resources().justNumbers(), NodeResources::subtract); } private static class ClusterKey { private final ApplicationId application; private final ClusterSpec.Id cluster; public ClusterKey(ApplicationId application, ClusterSpec.Id cluster) { this.application = application; this.cluster = cluster; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClusterKey that = (ClusterKey) o; return application.equals(that.application) && cluster.equals(that.cluster); } @Override public int hashCode() { return Objects.hash(application, cluster); } } }
class MetricsReporter extends NodeRepositoryMaintainer { private final Metric metric; private final Orchestrator orchestrator; private final ServiceMonitor serviceMonitor; private final Map<Map<String, String>, Metric.Context> contextMap = new HashMap<>(); private final Supplier<Integer> pendingRedeploymentsSupplier; MetricsReporter(NodeRepository nodeRepository, Metric metric, Orchestrator orchestrator, ServiceMonitor serviceMonitor, Supplier<Integer> pendingRedeploymentsSupplier, Duration interval) { super(nodeRepository, interval, metric); this.metric = metric; this.orchestrator = orchestrator; this.serviceMonitor = serviceMonitor; this.pendingRedeploymentsSupplier = pendingRedeploymentsSupplier; } @Override public boolean maintain() { NodeList nodes = nodeRepository().list(); ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot(); updateZoneMetrics(); updateCacheMetrics(); updateMaintenanceMetrics(); nodes.forEach(node -> updateNodeMetrics(node, serviceModel)); updateNodeCountMetrics(nodes); updateLockMetrics(); updateDockerMetrics(nodes); updateTenantUsageMetrics(nodes); updateRepairTicketMetrics(nodes); updateAllocationMetrics(nodes); return true; } private void updateZoneMetrics() { metric.set("zone.working", nodeRepository().isWorking() ? 1 : 0, null); } private void updateCacheMetrics() { CacheStats nodeCacheStats = nodeRepository().database().nodeSerializerCacheStats(); metric.set("cache.nodeObject.hitRate", nodeCacheStats.hitRate(), null); metric.set("cache.nodeObject.evictionCount", nodeCacheStats.evictionCount(), null); metric.set("cache.nodeObject.size", nodeCacheStats.size(), null); CacheStats curatorCacheStats = nodeRepository().database().cacheStats(); metric.set("cache.curator.hitRate", curatorCacheStats.hitRate(), null); metric.set("cache.curator.evictionCount", curatorCacheStats.evictionCount(), null); metric.set("cache.curator.size", curatorCacheStats.size(), null); } private void updateMaintenanceMetrics() { metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null); } private void updateNodeMetrics(Node node, ServiceModel serviceModel) { Metric.Context context; Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) { ApplicationId applicationId = allocation.get().owner(); Map<String, String> dimensions = new HashMap<>(dimensions(applicationId)); dimensions.put("state", node.state().name()); dimensions.put("host", node.hostname()); dimensions.put("clustertype", allocation.get().membership().cluster().type().name()); dimensions.put("clusterid", allocation.get().membership().cluster().id().value()); context = getContext(dimensions); long wantedRestartGeneration = allocation.get().restartGeneration().wanted(); metric.set("wantedRestartGeneration", wantedRestartGeneration, context); long currentRestartGeneration = allocation.get().restartGeneration().current(); metric.set("currentRestartGeneration", currentRestartGeneration, context); boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration; metric.set("wantToRestart", wantToRestart ? 1 : 0, context); metric.set("retired", allocation.get().membership().retired() ? 1 : 0, context); Version wantedVersion = allocation.get().membership().cluster().vespaVersion(); double wantedVersionNumber = getVersionAsNumber(wantedVersion); metric.set("wantedVespaVersion", wantedVersionNumber, context); Optional<Version> currentVersion = node.status().vespaVersion(); boolean converged = currentVersion.isPresent() && currentVersion.get().equals(wantedVersion); metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context); } else { context = getContext(Map.of("state", node.state().name(), "host", node.hostname())); } Optional<Version> currentVersion = node.status().vespaVersion(); if (currentVersion.isPresent()) { double currentVersionNumber = getVersionAsNumber(currentVersion.get()); metric.set("currentVespaVersion", currentVersionNumber, context); } long wantedRebootGeneration = node.status().reboot().wanted(); metric.set("wantedRebootGeneration", wantedRebootGeneration, context); long currentRebootGeneration = node.status().reboot().current(); metric.set("currentRebootGeneration", currentRebootGeneration, context); boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration; metric.set("wantToReboot", wantToReboot ? 1 : 0, context); metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context); metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context); metric.set("failReport", NodeFailer.reasonsToFailParentHost(node).isEmpty() ? 0 : 1, context); HostName hostname = new HostName(node.hostname()); serviceModel.getApplication(hostname) .map(ApplicationInstance::reference) .map(reference -> orchestrator.getHostInfo(reference, hostname)) .ifPresent(info -> { int suspended = info.status().isSuspended() ? 1 : 0; metric.set("suspended", suspended, context); metric.set("allowedToBeDown", suspended, context); long suspendedSeconds = info.suspendedSince() .map(suspendedSince -> Duration.between(suspendedSince, clock().instant()).getSeconds()) .orElse(0L); metric.set("suspendedSeconds", suspendedSeconds, context); }); long numberOfServices; List<ServiceInstance> services = serviceModel.getServiceInstancesByHostName().get(hostname); if (services == null) { numberOfServices = 0; } else { Map<ServiceStatus, Long> servicesCount = services.stream().collect( Collectors.groupingBy(ServiceInstance::serviceStatus, Collectors.counting())); numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum(); metric.set( "numberOfServicesUp", servicesCount.getOrDefault(ServiceStatus.UP, 0L), context); metric.set( "numberOfServicesNotChecked", servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L), context); long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L); metric.set("numberOfServicesDown", numberOfServicesDown, context); metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context); boolean down = NodeHealthTracker.allDown(services); metric.set("nodeFailerBadNode", (down ? 1 : 0), context); boolean nodeDownInNodeRepo = node.history().event(History.Event.Type.down).isPresent(); metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context); } metric.set("numberOfServices", numberOfServices, context); } private static String toApp(ApplicationId applicationId) { return applicationId.application().value() + "." + applicationId.instance().value(); } /** * A version 6.163.20 will be returned as a number 163.020. The major * version can normally be inferred. As long as the micro version stays * below 1000 these numbers sort like Version. */ private static double getVersionAsNumber(Version version) { return version.getMinor() + version.getMicro() / 1000.0; } private Metric.Context getContext(Map<String, String> dimensions) { return contextMap.computeIfAbsent(dimensions, metric::createContext); } private void updateNodeCountMetrics(NodeList nodes) { Map<State, List<Node>> nodesByState = nodes.nodeType(NodeType.tenant).asList().stream() .collect(Collectors.groupingBy(Node::state)); for (State state : State.values()) { List<Node> nodesInState = nodesByState.getOrDefault(state, List.of()); metric.set("hostedVespa." + state.name() + "Hosts", nodesInState.size(), null); } } private void updateLockMetrics() { LockStats.getGlobal().getLockMetricsByPath() .forEach((lockPath, lockMetrics) -> { Metric.Context context = getContext(Map.of("lockPath", lockPath)); metric.set("lockAttempt.acquire", lockMetrics.getAndResetAcquireCount(), context); metric.set("lockAttempt.acquireFailed", lockMetrics.getAndResetAcquireFailedCount(), context); metric.set("lockAttempt.acquireTimedOut", lockMetrics.getAndResetAcquireTimedOutCount(), context); metric.set("lockAttempt.locked", lockMetrics.getAndResetAcquireSucceededCount(), context); metric.set("lockAttempt.release", lockMetrics.getAndResetReleaseCount(), context); metric.set("lockAttempt.releaseFailed", lockMetrics.getAndResetReleaseFailedCount(), context); metric.set("lockAttempt.reentry", lockMetrics.getAndResetReentryCount(), context); metric.set("lockAttempt.deadlock", lockMetrics.getAndResetDeadlockCount(), context); metric.set("lockAttempt.nakedRelease", lockMetrics.getAndResetNakedReleaseCount(), context); metric.set("lockAttempt.acquireWithoutRelease", lockMetrics.getAndResetAcquireWithoutReleaseCount(), context); metric.set("lockAttempt.foreignRelease", lockMetrics.getAndResetForeignReleaseCount(), context); setLockLatencyMetrics("acquire", lockMetrics.getAndResetAcquireLatencyMetrics(), context); setLockLatencyMetrics("locked", lockMetrics.getAndResetLockedLatencyMetrics(), context); }); } private void setLockLatencyMetrics(String name, LatencyMetrics latencyMetrics, Metric.Context context) { metric.set("lockAttempt." + name + "Latency", latencyMetrics.latencySeconds(), context); metric.set("lockAttempt." + name + "MaxActiveLatency", latencyMetrics.maxActiveLatencySeconds(), context); metric.set("lockAttempt." + name + "Hz", latencyMetrics.startHz(), context); metric.set("lockAttempt." + name + "Load", latencyMetrics.load(), context); } private void updateDockerMetrics(NodeList nodes) { NodeResources totalCapacity = getCapacityTotal(nodes); metric.set("hostedVespa.docker.totalCapacityCpu", totalCapacity.vcpu(), null); metric.set("hostedVespa.docker.totalCapacityMem", totalCapacity.memoryGb(), null); metric.set("hostedVespa.docker.totalCapacityDisk", totalCapacity.diskGb(), null); NodeResources totalFreeCapacity = getFreeCapacityTotal(nodes); metric.set("hostedVespa.docker.freeCapacityCpu", totalFreeCapacity.vcpu(), null); metric.set("hostedVespa.docker.freeCapacityMem", totalFreeCapacity.memoryGb(), null); metric.set("hostedVespa.docker.freeCapacityDisk", totalFreeCapacity.diskGb(), null); } private void updateTenantUsageMetrics(NodeList nodes) { nodes.nodeType(NodeType.tenant).stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner())) .forEach( (applicationId, applicationNodes) -> { var allocatedCapacity = applicationNodes.stream() .map(node -> node.allocation().get().requestedResources().justNumbers()) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); var context = getContext(dimensions(applicationId)); metric.set("hostedVespa.docker.allocatedCapacityCpu", allocatedCapacity.vcpu(), context); metric.set("hostedVespa.docker.allocatedCapacityMem", allocatedCapacity.memoryGb(), context); metric.set("hostedVespa.docker.allocatedCapacityDisk", allocatedCapacity.diskGb(), context); } ); } private void updateRepairTicketMetrics(NodeList nodes) { nodes.nodeType(NodeType.host).stream() .map(node -> node.reports().getReport("repairTicket")) .flatMap(Optional::stream) .map(report -> report.getInspector().field("status").asString()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())) .forEach((status, number) -> metric.set("hostedVespa.breakfixedHosts", number, getContext(Map.of("status", status)))); } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenantName", application.tenant().value(), "applicationId", application.serializedForm().replace(':', '.'), "app", toApp(application)); } private static NodeResources getCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(host -> host.flavor().resources()) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); } private static NodeResources getFreeCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(n -> freeCapacityOf(nodes, n)) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); } private static NodeResources freeCapacityOf(NodeList nodes, Node dockerHost) { return nodes.childrenOf(dockerHost).asList().stream() .map(node -> node.flavor().resources().justNumbers()) .reduce(dockerHost.flavor().resources().justNumbers(), NodeResources::subtract); } private static class ClusterKey { private final ApplicationId application; private final ClusterSpec.Id cluster; public ClusterKey(ApplicationId application, ClusterSpec.Id cluster) { this.application = application; this.cluster = cluster; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClusterKey that = (ClusterKey) o; return application.equals(that.application) && cluster.equals(that.cluster); } @Override public int hashCode() { return Objects.hash(application, cluster); } } }
What about total cost/additional line items?
private HttpResponse getAllInvoices() { var invoices = billingController.getInvoices(); var headers = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk" }; var rows = invoices.stream() .map(invoice -> { return new Object[] { invoice.id().value(), invoice.tenant().value(), invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE), invoice.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE), invoice.sumCpuHours(), invoice.sumMemoryHours(), invoice.sumDiskHours(), invoice.sumCpuCost(), invoice.sumMemoryCost(), invoice.sumDiskCost() }; }) .collect(Collectors.toList()); return new CsvResponse(headers, rows); }
};
private HttpResponse getAllInvoices() { var invoices = billingController.getInvoices(); var headers = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk", "Additional" }; var rows = invoices.stream() .map(invoice -> { return new Object[] { invoice.id().value(), invoice.tenant().value(), invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE), invoice.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE), invoice.sumCpuHours(), invoice.sumMemoryHours(), invoice.sumDiskHours(), invoice.sumCpuCost(), invoice.sumMemoryCost(), invoice.sumDiskCost(), invoice.sumAdditionalCost() }; }) .collect(Collectors.toList()); return new CsvResponse(headers, rows); }
class BillingApiHandler extends LoggingRequestHandler { private static final String OPTIONAL_PREFIX = "/api"; private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd"); private final BillingController billingController; private final ApplicationController applicationController; private final TenantController tenantController; public BillingApiHandler(Executor executor, AccessLog accessLog, Controller controller) { super(executor, accessLog); this.billingController = controller.serviceRegistry().billingController(); this.applicationController = controller.applications(); this.tenantController = controller.tenants(); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri(), OPTIONAL_PREFIX); String userId = userIdOrThrow(request); switch (request.getMethod()) { case GET: return handleGET(request, path, userId); case PATCH: return handlePATCH(request, path, userId); case DELETE: return handleDELETE(path, userId); case POST: return handlePOST(path, request, userId); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (NotFoundException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (Exception e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError("Internal problem while handling billing API request"); } } private HttpResponse handleGET(HttpRequest request, Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/token")) return getToken(path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/instrument")) return getInstruments(path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/billing")) return getBilling(path.get("tenant"), request.getProperty("until")); if (path.matches("/billing/v1/tenant/{tenant}/plan")) return getPlan(path.get("tenant")); if (path.matches("/billing/v1/billing")) return getBillingAllTenants(request.getProperty("until")); if (path.matches("/billing/v1/invoice/export")) return getAllInvoices(); if (path.matches("/billing/v1/invoice/tenant/{tenant}/line-item")) return getLineItems(path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(HttpRequest request, Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/instrument")) return patchActiveInstrument(request, path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/plan")) return patchPlan(request, path.get("tenant")); if (path.matches("/billing/v1/tenant/{tenant}/collection")) return patchCollectionMethod(request, path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/instrument/{instrument}")) return deleteInstrument(path.get("tenant"), userId, path.get("instrument")); if (path.matches("/billing/v1/invoice/line-item/{line-item-id}")) return deleteLineItem(path.get("line-item-id")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request, String userId) { if (path.matches("/billing/v1/invoice")) return createInvoice(request, userId); if (path.matches("/billing/v1/invoice/{invoice-id}/status")) return setInvoiceStatus(request, path.get("invoice-id")); if (path.matches("/billing/v1/invoice/tenant/{tenant}/line-item")) return addLineItem(request, path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse getPlan(String tenant) { var plan = billingController.getPlan(TenantName.from(tenant)); var slime = new Slime(); var root = slime.setObject(); root.setString("tenant", tenant); root.setString("plan", plan.value()); return new SlimeJsonResponse(slime); } private HttpResponse patchPlan(HttpRequest request, String tenant) { var tenantName = TenantName.from(tenant); var slime = inspectorOrThrow(request); var planId = PlanId.from(slime.field("plan").asString()); var hasDeployments = hasDeployments(tenantName); var result = billingController.setPlan(tenantName, planId, hasDeployments); if (result.isSuccess()) return new StringResponse("Plan: " + planId.value()); return ErrorResponse.forbidden(result.getErrorMessage().orElse("Invalid plan change")); } private HttpResponse patchCollectionMethod(HttpRequest request, String tenant) { var tenantName = TenantName.from(tenant); var slime = inspectorOrThrow(request); var newMethod = slime.field("collection").valid() ? slime.field("collection").asString().toUpperCase() : slime.field("collectionMethod").asString().toUpperCase(); if (newMethod.isEmpty()) return ErrorResponse.badRequest("No collection method specified"); try { var result = billingController.setCollectionMethod(tenantName, CollectionMethod.valueOf(newMethod)); if (result.isSuccess()) return new StringResponse("Collection method updated to " + newMethod); return ErrorResponse.forbidden(result.getErrorMessage().orElse("Invalid collection method change")); } catch (IllegalArgumentException iea){ return ErrorResponse.badRequest("Invalid collection method: " + newMethod); } } private HttpResponse getBillingAllTenants(String until) { try { var untilDate = untilParameter(until); var uncommittedInvoices = billingController.createUncommittedInvoices(untilDate); var slime = new Slime(); var root = slime.setObject(); root.setString("until", untilDate.format(DateTimeFormatter.ISO_DATE)); var tenants = root.setArray("tenants"); tenantController.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> { var invoice = uncommittedInvoices.get(tenant.name()); var tc = tenants.addObject(); tc.setString("tenant", tenant.name().value()); getPlanForTenant(tc, tenant.name()); getCollectionForTenant(tc, tenant.name()); renderCurrentUsage(tc.setObject("current"), invoice); renderAdditionalItems(tc.setObject("additional").setArray("items"), billingController.getUnusedLineItems(tenant.name())); billingController.getDefaultInstrument(tenant.name()).ifPresent(card -> renderInstrument(tc.setObject("payment"), card) ); }); return new SlimeJsonResponse(slime); } catch (DateTimeParseException e) { return ErrorResponse.badRequest("Could not parse date: " + until); } } private void getCollectionForTenant(Cursor tc, TenantName tenant) { var collection = billingController.getCollectionMethod(tenant); tc.setString("collection", collection.name()); } private HttpResponse addLineItem(HttpRequest request, String tenant) { Inspector inspector = inspectorOrThrow(request); billingController.addLineItem( TenantName.from(tenant), getInspectorFieldOrThrow(inspector, "description"), new BigDecimal(getInspectorFieldOrThrow(inspector, "amount")), userIdOrThrow(request)); return new MessageResponse("Added line item for tenant " + tenant); } private HttpResponse setInvoiceStatus(HttpRequest request, String invoiceId) { Inspector inspector = inspectorOrThrow(request); String status = getInspectorFieldOrThrow(inspector, "status"); billingController.updateInvoiceStatus(Invoice.Id.of(invoiceId), userIdOrThrow(request), status); return new MessageResponse("Updated status of invoice " + invoiceId); } private HttpResponse createInvoice(HttpRequest request, String userId) { Inspector inspector = inspectorOrThrow(request); TenantName tenantName = TenantName.from(getInspectorFieldOrThrow(inspector, "tenant")); LocalDate startDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "startTime")); LocalDate endDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "endTime")); ZonedDateTime startTime = startDate.atStartOfDay(ZoneId.of("UTC")); ZonedDateTime endTime = endDate.atStartOfDay(ZoneId.of("UTC")); var invoiceId = billingController.createInvoiceForPeriod(tenantName, startTime, endTime, userId); return new MessageResponse("Created invoice with ID " + invoiceId.value()); } private HttpResponse getInstruments(String tenant, String userId) { var instrumentListResponse = billingController.listInstruments(TenantName.from(tenant), userId); return new JacksonJsonResponse<>(200, instrumentListResponse); } private HttpResponse getToken(String tenant, String userId) { return new StringResponse(billingController.createClientToken(tenant, userId)); } private HttpResponse getBilling(String tenant, String until) { try { var untilDate = untilParameter(until); var tenantId = TenantName.from(tenant); var slimeResponse = new Slime(); var root = slimeResponse.setObject(); root.setString("until", untilDate.format(DateTimeFormatter.ISO_DATE)); getPlanForTenant(root, tenantId); renderCurrentUsage(root.setObject("current"), getCurrentUsageForTenant(tenantId, untilDate)); renderAdditionalItems(root.setObject("additional").setArray("items"), billingController.getUnusedLineItems(tenantId)); renderInvoices(root.setArray("bills"), getInvoicesForTenant(tenantId)); billingController.getDefaultInstrument(tenantId).ifPresent( card -> renderInstrument(root.setObject("payment"), card) ); root.setString("collection", billingController.getCollectionMethod(tenantId).name()); return new SlimeJsonResponse(slimeResponse); } catch (DateTimeParseException e) { return ErrorResponse.badRequest("Could not parse date: " + until); } } private HttpResponse getLineItems(String tenant) { var slimeResponse = new Slime(); var root = slimeResponse.setObject(); var lineItems = root.setArray("lineItems"); billingController.getUnusedLineItems(TenantName.from(tenant)) .forEach(lineItem -> { var itemCursor = lineItems.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); return new SlimeJsonResponse(slimeResponse); } private void getPlanForTenant(Cursor cursor, TenantName tenant) { PlanId plan = billingController.getPlan(tenant); cursor.setString("plan", plan.value()); cursor.setString("planName", billingController.getPlanDisplayName(plan)); } private void renderInstrument(Cursor cursor, PaymentInstrument instrument) { cursor.setString("pi-id", instrument.getId()); cursor.setString("type", instrument.getType()); cursor.setString("brand", instrument.getBrand()); cursor.setString("endingWith", instrument.getEndingWith()); cursor.setString("expiryDate", instrument.getExpiryDate()); cursor.setString("displayText", instrument.getDisplayText()); cursor.setString("nameOnCard", instrument.getNameOnCard()); cursor.setString("addressLine1", instrument.getAddressLine1()); cursor.setString("addressLine2", instrument.getAddressLine2()); cursor.setString("zip", instrument.getZip()); cursor.setString("city", instrument.getCity()); cursor.setString("state", instrument.getState()); cursor.setString("country", instrument.getCountry()); } private void renderCurrentUsage(Cursor cursor, Invoice currentUsage) { if (currentUsage == null) return; cursor.setString("amount", currentUsage.sum().toPlainString()); cursor.setString("status", "accrued"); cursor.setString("from", currentUsage.getStartTime().format(DATE_TIME_FORMATTER)); var itemsCursor = cursor.setArray("items"); currentUsage.lineItems().forEach(lineItem -> { var itemCursor = itemsCursor.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); } private void renderAdditionalItems(Cursor cursor, List<Invoice.LineItem> items) { items.forEach(item -> { renderLineItemToCursor(cursor.addObject(), item); }); } private Invoice getCurrentUsageForTenant(TenantName tenant, LocalDate until) { return billingController.createUncommittedInvoice(tenant, until); } private List<Invoice> getInvoicesForTenant(TenantName tenant) { return billingController.getInvoicesForTenant(tenant); } private void renderInvoices(Cursor cursor, List<Invoice> invoices) { invoices.forEach(invoice -> { var invoiceCursor = cursor.addObject(); renderInvoiceToCursor(invoiceCursor, invoice); }); } private void renderInvoiceToCursor(Cursor invoiceCursor, Invoice invoice) { invoiceCursor.setString("id", invoice.id().value()); invoiceCursor.setString("from", invoice.getStartTime().format(DATE_TIME_FORMATTER)); invoiceCursor.setString("to", invoice.getEndTime().format(DATE_TIME_FORMATTER)); invoiceCursor.setString("amount", invoice.sum().toString()); invoiceCursor.setString("status", invoice.status()); var statusCursor = invoiceCursor.setArray("statusHistory"); renderStatusHistory(statusCursor, invoice.statusHistory()); var lineItemsCursor = invoiceCursor.setArray("items"); invoice.lineItems().forEach(lineItem -> { var itemCursor = lineItemsCursor.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); } private void renderStatusHistory(Cursor cursor, Invoice.StatusHistory statusHistory) { statusHistory.getHistory() .entrySet() .stream() .forEach(entry -> { var c = cursor.addObject(); c.setString("at", entry.getKey().format(DATE_TIME_FORMATTER)); c.setString("status", entry.getValue()); }); } private void renderLineItemToCursor(Cursor cursor, Invoice.LineItem lineItem) { cursor.setString("id", lineItem.id()); cursor.setString("description", lineItem.description()); cursor.setString("amount", lineItem.amount().toString()); cursor.setString("plan", lineItem.plan()); cursor.setString("planName", billingController.getPlanDisplayName(PlanId.from(lineItem.plan()))); lineItem.applicationId().ifPresent(appId -> { cursor.setString("application", appId.application().value()); cursor.setString("instance", appId.instance().value()); }); lineItem.zoneId().ifPresent(zoneId -> cursor.setString("zone", zoneId.value()) ); lineItem.getCpuHours().ifPresent(cpuHours -> cursor.setString("cpuHours", cpuHours.toString()) ); lineItem.getMemoryHours().ifPresent(memoryHours -> cursor.setString("memoryHours", memoryHours.toString()) ); lineItem.getDiskHours().ifPresent(diskHours -> cursor.setString("diskHours", diskHours.toString()) ); lineItem.getCpuCost().ifPresent(cpuCost -> cursor.setString("cpuCost", cpuCost.toString()) ); lineItem.getMemoryCost().ifPresent(memoryCost -> cursor.setString("memoryCost", memoryCost.toString()) ); lineItem.getDiskCost().ifPresent(diskCost -> cursor.setString("diskCost", diskCost.toString()) ); } private HttpResponse deleteInstrument(String tenant, String userId, String instrument) { if (billingController.deleteInstrument(TenantName.from(tenant), userId, instrument)) { return new StringResponse("OK"); } else { return ErrorResponse.forbidden("Cannot delete payment instrument you don't own"); } } private HttpResponse deleteLineItem(String lineItemId) { billingController.deleteLineItem(lineItemId); return new MessageResponse("Succesfully deleted line item " + lineItemId); } private HttpResponse patchActiveInstrument(HttpRequest request, String tenant, String userId) { var inspector = inspectorOrThrow(request); String instrumentId = getInspectorFieldOrThrow(inspector, "active"); InstrumentOwner paymentInstrument = new InstrumentOwner(TenantName.from(tenant), userId, instrumentId, true); boolean success = billingController.setActivePaymentInstrument(paymentInstrument); return success ? new StringResponse("OK") : ErrorResponse.internalServerError("Failed to patch active instrument"); } private Inspector inspectorOrThrow(HttpRequest request) { try { return SlimeUtils.jsonToSlime(request.getData().readAllBytes()).get(); } catch (IOException e) { throw new BadRequestException("Failed to parse request body"); } } private static String userIdOrThrow(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().getUserPrincipal()) .map(Principal::getName) .orElseThrow(() -> new ForbiddenException("Must be authenticated to use this API")); } private static String getInspectorFieldOrThrow(Inspector inspector, String field) { if (!inspector.field(field).valid()) throw new BadRequestException("Field " + field + " cannot be null"); return inspector.field(field).asString(); } private LocalDate untilParameter(String until) { if (until == null || until.isEmpty() || until.isBlank()) return LocalDate.now().plusDays(1); return LocalDate.parse(until); } private boolean hasDeployments(TenantName tenantName) { return applicationController.asList(tenantName) .stream() .flatMap(app -> app.instances().values() .stream() .flatMap(instance -> instance.deployments().values().stream()) ) .count() > 0; } private static class CsvResponse extends HttpResponse { private final String[] header; private final List<Object[]> rows; CsvResponse(String[] header, List<Object[]> rows) { super(200); this.header = header; this.rows = rows; } @Override public void render(OutputStream outputStream) throws IOException { var writer = new OutputStreamWriter(outputStream); var printer = CSVFormat.DEFAULT.withRecordSeparator('\n').withHeader(this.header).print(writer); for (var row : this.rows) printer.printRecord(row); printer.flush(); } @Override public String getContentType() { return "text/csv; encoding=utf-8"; } } }
class BillingApiHandler extends LoggingRequestHandler { private static final String OPTIONAL_PREFIX = "/api"; private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd"); private final BillingController billingController; private final ApplicationController applicationController; private final TenantController tenantController; public BillingApiHandler(Executor executor, AccessLog accessLog, Controller controller) { super(executor, accessLog); this.billingController = controller.serviceRegistry().billingController(); this.applicationController = controller.applications(); this.tenantController = controller.tenants(); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri(), OPTIONAL_PREFIX); String userId = userIdOrThrow(request); switch (request.getMethod()) { case GET: return handleGET(request, path, userId); case PATCH: return handlePATCH(request, path, userId); case DELETE: return handleDELETE(path, userId); case POST: return handlePOST(path, request, userId); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (NotFoundException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (Exception e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError("Internal problem while handling billing API request"); } } private HttpResponse handleGET(HttpRequest request, Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/token")) return getToken(path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/instrument")) return getInstruments(path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/billing")) return getBilling(path.get("tenant"), request.getProperty("until")); if (path.matches("/billing/v1/tenant/{tenant}/plan")) return getPlan(path.get("tenant")); if (path.matches("/billing/v1/billing")) return getBillingAllTenants(request.getProperty("until")); if (path.matches("/billing/v1/invoice/export")) return getAllInvoices(); if (path.matches("/billing/v1/invoice/tenant/{tenant}/line-item")) return getLineItems(path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(HttpRequest request, Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/instrument")) return patchActiveInstrument(request, path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/plan")) return patchPlan(request, path.get("tenant")); if (path.matches("/billing/v1/tenant/{tenant}/collection")) return patchCollectionMethod(request, path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/instrument/{instrument}")) return deleteInstrument(path.get("tenant"), userId, path.get("instrument")); if (path.matches("/billing/v1/invoice/line-item/{line-item-id}")) return deleteLineItem(path.get("line-item-id")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request, String userId) { if (path.matches("/billing/v1/invoice")) return createInvoice(request, userId); if (path.matches("/billing/v1/invoice/{invoice-id}/status")) return setInvoiceStatus(request, path.get("invoice-id")); if (path.matches("/billing/v1/invoice/tenant/{tenant}/line-item")) return addLineItem(request, path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse getPlan(String tenant) { var plan = billingController.getPlan(TenantName.from(tenant)); var slime = new Slime(); var root = slime.setObject(); root.setString("tenant", tenant); root.setString("plan", plan.value()); return new SlimeJsonResponse(slime); } private HttpResponse patchPlan(HttpRequest request, String tenant) { var tenantName = TenantName.from(tenant); var slime = inspectorOrThrow(request); var planId = PlanId.from(slime.field("plan").asString()); var hasDeployments = hasDeployments(tenantName); var result = billingController.setPlan(tenantName, planId, hasDeployments); if (result.isSuccess()) return new StringResponse("Plan: " + planId.value()); return ErrorResponse.forbidden(result.getErrorMessage().orElse("Invalid plan change")); } private HttpResponse patchCollectionMethod(HttpRequest request, String tenant) { var tenantName = TenantName.from(tenant); var slime = inspectorOrThrow(request); var newMethod = slime.field("collection").valid() ? slime.field("collection").asString().toUpperCase() : slime.field("collectionMethod").asString().toUpperCase(); if (newMethod.isEmpty()) return ErrorResponse.badRequest("No collection method specified"); try { var result = billingController.setCollectionMethod(tenantName, CollectionMethod.valueOf(newMethod)); if (result.isSuccess()) return new StringResponse("Collection method updated to " + newMethod); return ErrorResponse.forbidden(result.getErrorMessage().orElse("Invalid collection method change")); } catch (IllegalArgumentException iea){ return ErrorResponse.badRequest("Invalid collection method: " + newMethod); } } private HttpResponse getBillingAllTenants(String until) { try { var untilDate = untilParameter(until); var uncommittedInvoices = billingController.createUncommittedInvoices(untilDate); var slime = new Slime(); var root = slime.setObject(); root.setString("until", untilDate.format(DateTimeFormatter.ISO_DATE)); var tenants = root.setArray("tenants"); tenantController.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> { var invoice = uncommittedInvoices.get(tenant.name()); var tc = tenants.addObject(); tc.setString("tenant", tenant.name().value()); getPlanForTenant(tc, tenant.name()); getCollectionForTenant(tc, tenant.name()); renderCurrentUsage(tc.setObject("current"), invoice); renderAdditionalItems(tc.setObject("additional").setArray("items"), billingController.getUnusedLineItems(tenant.name())); billingController.getDefaultInstrument(tenant.name()).ifPresent(card -> renderInstrument(tc.setObject("payment"), card) ); }); return new SlimeJsonResponse(slime); } catch (DateTimeParseException e) { return ErrorResponse.badRequest("Could not parse date: " + until); } } private void getCollectionForTenant(Cursor tc, TenantName tenant) { var collection = billingController.getCollectionMethod(tenant); tc.setString("collection", collection.name()); } private HttpResponse addLineItem(HttpRequest request, String tenant) { Inspector inspector = inspectorOrThrow(request); billingController.addLineItem( TenantName.from(tenant), getInspectorFieldOrThrow(inspector, "description"), new BigDecimal(getInspectorFieldOrThrow(inspector, "amount")), userIdOrThrow(request)); return new MessageResponse("Added line item for tenant " + tenant); } private HttpResponse setInvoiceStatus(HttpRequest request, String invoiceId) { Inspector inspector = inspectorOrThrow(request); String status = getInspectorFieldOrThrow(inspector, "status"); billingController.updateInvoiceStatus(Invoice.Id.of(invoiceId), userIdOrThrow(request), status); return new MessageResponse("Updated status of invoice " + invoiceId); } private HttpResponse createInvoice(HttpRequest request, String userId) { Inspector inspector = inspectorOrThrow(request); TenantName tenantName = TenantName.from(getInspectorFieldOrThrow(inspector, "tenant")); LocalDate startDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "startTime")); LocalDate endDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "endTime")); ZonedDateTime startTime = startDate.atStartOfDay(ZoneId.of("UTC")); ZonedDateTime endTime = endDate.atStartOfDay(ZoneId.of("UTC")); var invoiceId = billingController.createInvoiceForPeriod(tenantName, startTime, endTime, userId); return new MessageResponse("Created invoice with ID " + invoiceId.value()); } private HttpResponse getInstruments(String tenant, String userId) { var instrumentListResponse = billingController.listInstruments(TenantName.from(tenant), userId); return new JacksonJsonResponse<>(200, instrumentListResponse); } private HttpResponse getToken(String tenant, String userId) { return new StringResponse(billingController.createClientToken(tenant, userId)); } private HttpResponse getBilling(String tenant, String until) { try { var untilDate = untilParameter(until); var tenantId = TenantName.from(tenant); var slimeResponse = new Slime(); var root = slimeResponse.setObject(); root.setString("until", untilDate.format(DateTimeFormatter.ISO_DATE)); getPlanForTenant(root, tenantId); renderCurrentUsage(root.setObject("current"), getCurrentUsageForTenant(tenantId, untilDate)); renderAdditionalItems(root.setObject("additional").setArray("items"), billingController.getUnusedLineItems(tenantId)); renderInvoices(root.setArray("bills"), getInvoicesForTenant(tenantId)); billingController.getDefaultInstrument(tenantId).ifPresent( card -> renderInstrument(root.setObject("payment"), card) ); root.setString("collection", billingController.getCollectionMethod(tenantId).name()); return new SlimeJsonResponse(slimeResponse); } catch (DateTimeParseException e) { return ErrorResponse.badRequest("Could not parse date: " + until); } } private HttpResponse getLineItems(String tenant) { var slimeResponse = new Slime(); var root = slimeResponse.setObject(); var lineItems = root.setArray("lineItems"); billingController.getUnusedLineItems(TenantName.from(tenant)) .forEach(lineItem -> { var itemCursor = lineItems.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); return new SlimeJsonResponse(slimeResponse); } private void getPlanForTenant(Cursor cursor, TenantName tenant) { PlanId plan = billingController.getPlan(tenant); cursor.setString("plan", plan.value()); cursor.setString("planName", billingController.getPlanDisplayName(plan)); } private void renderInstrument(Cursor cursor, PaymentInstrument instrument) { cursor.setString("pi-id", instrument.getId()); cursor.setString("type", instrument.getType()); cursor.setString("brand", instrument.getBrand()); cursor.setString("endingWith", instrument.getEndingWith()); cursor.setString("expiryDate", instrument.getExpiryDate()); cursor.setString("displayText", instrument.getDisplayText()); cursor.setString("nameOnCard", instrument.getNameOnCard()); cursor.setString("addressLine1", instrument.getAddressLine1()); cursor.setString("addressLine2", instrument.getAddressLine2()); cursor.setString("zip", instrument.getZip()); cursor.setString("city", instrument.getCity()); cursor.setString("state", instrument.getState()); cursor.setString("country", instrument.getCountry()); } private void renderCurrentUsage(Cursor cursor, Invoice currentUsage) { if (currentUsage == null) return; cursor.setString("amount", currentUsage.sum().toPlainString()); cursor.setString("status", "accrued"); cursor.setString("from", currentUsage.getStartTime().format(DATE_TIME_FORMATTER)); var itemsCursor = cursor.setArray("items"); currentUsage.lineItems().forEach(lineItem -> { var itemCursor = itemsCursor.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); } private void renderAdditionalItems(Cursor cursor, List<Invoice.LineItem> items) { items.forEach(item -> { renderLineItemToCursor(cursor.addObject(), item); }); } private Invoice getCurrentUsageForTenant(TenantName tenant, LocalDate until) { return billingController.createUncommittedInvoice(tenant, until); } private List<Invoice> getInvoicesForTenant(TenantName tenant) { return billingController.getInvoicesForTenant(tenant); } private void renderInvoices(Cursor cursor, List<Invoice> invoices) { invoices.forEach(invoice -> { var invoiceCursor = cursor.addObject(); renderInvoiceToCursor(invoiceCursor, invoice); }); } private void renderInvoiceToCursor(Cursor invoiceCursor, Invoice invoice) { invoiceCursor.setString("id", invoice.id().value()); invoiceCursor.setString("from", invoice.getStartTime().format(DATE_TIME_FORMATTER)); invoiceCursor.setString("to", invoice.getEndTime().format(DATE_TIME_FORMATTER)); invoiceCursor.setString("amount", invoice.sum().toString()); invoiceCursor.setString("status", invoice.status()); var statusCursor = invoiceCursor.setArray("statusHistory"); renderStatusHistory(statusCursor, invoice.statusHistory()); var lineItemsCursor = invoiceCursor.setArray("items"); invoice.lineItems().forEach(lineItem -> { var itemCursor = lineItemsCursor.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); } private void renderStatusHistory(Cursor cursor, Invoice.StatusHistory statusHistory) { statusHistory.getHistory() .entrySet() .stream() .forEach(entry -> { var c = cursor.addObject(); c.setString("at", entry.getKey().format(DATE_TIME_FORMATTER)); c.setString("status", entry.getValue()); }); } private void renderLineItemToCursor(Cursor cursor, Invoice.LineItem lineItem) { cursor.setString("id", lineItem.id()); cursor.setString("description", lineItem.description()); cursor.setString("amount", lineItem.amount().toString()); cursor.setString("plan", lineItem.plan()); cursor.setString("planName", billingController.getPlanDisplayName(PlanId.from(lineItem.plan()))); lineItem.applicationId().ifPresent(appId -> { cursor.setString("application", appId.application().value()); cursor.setString("instance", appId.instance().value()); }); lineItem.zoneId().ifPresent(zoneId -> cursor.setString("zone", zoneId.value()) ); lineItem.getCpuHours().ifPresent(cpuHours -> cursor.setString("cpuHours", cpuHours.toString()) ); lineItem.getMemoryHours().ifPresent(memoryHours -> cursor.setString("memoryHours", memoryHours.toString()) ); lineItem.getDiskHours().ifPresent(diskHours -> cursor.setString("diskHours", diskHours.toString()) ); lineItem.getCpuCost().ifPresent(cpuCost -> cursor.setString("cpuCost", cpuCost.toString()) ); lineItem.getMemoryCost().ifPresent(memoryCost -> cursor.setString("memoryCost", memoryCost.toString()) ); lineItem.getDiskCost().ifPresent(diskCost -> cursor.setString("diskCost", diskCost.toString()) ); } private HttpResponse deleteInstrument(String tenant, String userId, String instrument) { if (billingController.deleteInstrument(TenantName.from(tenant), userId, instrument)) { return new StringResponse("OK"); } else { return ErrorResponse.forbidden("Cannot delete payment instrument you don't own"); } } private HttpResponse deleteLineItem(String lineItemId) { billingController.deleteLineItem(lineItemId); return new MessageResponse("Succesfully deleted line item " + lineItemId); } private HttpResponse patchActiveInstrument(HttpRequest request, String tenant, String userId) { var inspector = inspectorOrThrow(request); String instrumentId = getInspectorFieldOrThrow(inspector, "active"); InstrumentOwner paymentInstrument = new InstrumentOwner(TenantName.from(tenant), userId, instrumentId, true); boolean success = billingController.setActivePaymentInstrument(paymentInstrument); return success ? new StringResponse("OK") : ErrorResponse.internalServerError("Failed to patch active instrument"); } private Inspector inspectorOrThrow(HttpRequest request) { try { return SlimeUtils.jsonToSlime(request.getData().readAllBytes()).get(); } catch (IOException e) { throw new BadRequestException("Failed to parse request body"); } } private static String userIdOrThrow(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().getUserPrincipal()) .map(Principal::getName) .orElseThrow(() -> new ForbiddenException("Must be authenticated to use this API")); } private static String getInspectorFieldOrThrow(Inspector inspector, String field) { if (!inspector.field(field).valid()) throw new BadRequestException("Field " + field + " cannot be null"); return inspector.field(field).asString(); } private LocalDate untilParameter(String until) { if (until == null || until.isEmpty() || until.isBlank()) return LocalDate.now().plusDays(1); return LocalDate.parse(until); } private boolean hasDeployments(TenantName tenantName) { return applicationController.asList(tenantName) .stream() .flatMap(app -> app.instances().values() .stream() .flatMap(instance -> instance.deployments().values().stream()) ) .count() > 0; } private static class CsvResponse extends HttpResponse { private final String[] header; private final List<Object[]> rows; CsvResponse(String[] header, List<Object[]> rows) { super(200); this.header = header; this.rows = rows; } @Override public void render(OutputStream outputStream) throws IOException { var writer = new OutputStreamWriter(outputStream); var printer = CSVFormat.DEFAULT.withRecordSeparator('\n').withHeader(this.header).print(writer); for (var row : this.rows) printer.printRecord(row); printer.flush(); } @Override public String getContentType() { return "text/csv; encoding=utf-8"; } } }
Not supported in the downstream process, yet. But I will add them here so that we know they're there at least.
private HttpResponse getAllInvoices() { var invoices = billingController.getInvoices(); var headers = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk" }; var rows = invoices.stream() .map(invoice -> { return new Object[] { invoice.id().value(), invoice.tenant().value(), invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE), invoice.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE), invoice.sumCpuHours(), invoice.sumMemoryHours(), invoice.sumDiskHours(), invoice.sumCpuCost(), invoice.sumMemoryCost(), invoice.sumDiskCost() }; }) .collect(Collectors.toList()); return new CsvResponse(headers, rows); }
};
private HttpResponse getAllInvoices() { var invoices = billingController.getInvoices(); var headers = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk", "Additional" }; var rows = invoices.stream() .map(invoice -> { return new Object[] { invoice.id().value(), invoice.tenant().value(), invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE), invoice.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE), invoice.sumCpuHours(), invoice.sumMemoryHours(), invoice.sumDiskHours(), invoice.sumCpuCost(), invoice.sumMemoryCost(), invoice.sumDiskCost(), invoice.sumAdditionalCost() }; }) .collect(Collectors.toList()); return new CsvResponse(headers, rows); }
class BillingApiHandler extends LoggingRequestHandler { private static final String OPTIONAL_PREFIX = "/api"; private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd"); private final BillingController billingController; private final ApplicationController applicationController; private final TenantController tenantController; public BillingApiHandler(Executor executor, AccessLog accessLog, Controller controller) { super(executor, accessLog); this.billingController = controller.serviceRegistry().billingController(); this.applicationController = controller.applications(); this.tenantController = controller.tenants(); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri(), OPTIONAL_PREFIX); String userId = userIdOrThrow(request); switch (request.getMethod()) { case GET: return handleGET(request, path, userId); case PATCH: return handlePATCH(request, path, userId); case DELETE: return handleDELETE(path, userId); case POST: return handlePOST(path, request, userId); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (NotFoundException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (Exception e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError("Internal problem while handling billing API request"); } } private HttpResponse handleGET(HttpRequest request, Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/token")) return getToken(path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/instrument")) return getInstruments(path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/billing")) return getBilling(path.get("tenant"), request.getProperty("until")); if (path.matches("/billing/v1/tenant/{tenant}/plan")) return getPlan(path.get("tenant")); if (path.matches("/billing/v1/billing")) return getBillingAllTenants(request.getProperty("until")); if (path.matches("/billing/v1/invoice/export")) return getAllInvoices(); if (path.matches("/billing/v1/invoice/tenant/{tenant}/line-item")) return getLineItems(path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(HttpRequest request, Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/instrument")) return patchActiveInstrument(request, path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/plan")) return patchPlan(request, path.get("tenant")); if (path.matches("/billing/v1/tenant/{tenant}/collection")) return patchCollectionMethod(request, path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/instrument/{instrument}")) return deleteInstrument(path.get("tenant"), userId, path.get("instrument")); if (path.matches("/billing/v1/invoice/line-item/{line-item-id}")) return deleteLineItem(path.get("line-item-id")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request, String userId) { if (path.matches("/billing/v1/invoice")) return createInvoice(request, userId); if (path.matches("/billing/v1/invoice/{invoice-id}/status")) return setInvoiceStatus(request, path.get("invoice-id")); if (path.matches("/billing/v1/invoice/tenant/{tenant}/line-item")) return addLineItem(request, path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse getPlan(String tenant) { var plan = billingController.getPlan(TenantName.from(tenant)); var slime = new Slime(); var root = slime.setObject(); root.setString("tenant", tenant); root.setString("plan", plan.value()); return new SlimeJsonResponse(slime); } private HttpResponse patchPlan(HttpRequest request, String tenant) { var tenantName = TenantName.from(tenant); var slime = inspectorOrThrow(request); var planId = PlanId.from(slime.field("plan").asString()); var hasDeployments = hasDeployments(tenantName); var result = billingController.setPlan(tenantName, planId, hasDeployments); if (result.isSuccess()) return new StringResponse("Plan: " + planId.value()); return ErrorResponse.forbidden(result.getErrorMessage().orElse("Invalid plan change")); } private HttpResponse patchCollectionMethod(HttpRequest request, String tenant) { var tenantName = TenantName.from(tenant); var slime = inspectorOrThrow(request); var newMethod = slime.field("collection").valid() ? slime.field("collection").asString().toUpperCase() : slime.field("collectionMethod").asString().toUpperCase(); if (newMethod.isEmpty()) return ErrorResponse.badRequest("No collection method specified"); try { var result = billingController.setCollectionMethod(tenantName, CollectionMethod.valueOf(newMethod)); if (result.isSuccess()) return new StringResponse("Collection method updated to " + newMethod); return ErrorResponse.forbidden(result.getErrorMessage().orElse("Invalid collection method change")); } catch (IllegalArgumentException iea){ return ErrorResponse.badRequest("Invalid collection method: " + newMethod); } } private HttpResponse getBillingAllTenants(String until) { try { var untilDate = untilParameter(until); var uncommittedInvoices = billingController.createUncommittedInvoices(untilDate); var slime = new Slime(); var root = slime.setObject(); root.setString("until", untilDate.format(DateTimeFormatter.ISO_DATE)); var tenants = root.setArray("tenants"); tenantController.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> { var invoice = uncommittedInvoices.get(tenant.name()); var tc = tenants.addObject(); tc.setString("tenant", tenant.name().value()); getPlanForTenant(tc, tenant.name()); getCollectionForTenant(tc, tenant.name()); renderCurrentUsage(tc.setObject("current"), invoice); renderAdditionalItems(tc.setObject("additional").setArray("items"), billingController.getUnusedLineItems(tenant.name())); billingController.getDefaultInstrument(tenant.name()).ifPresent(card -> renderInstrument(tc.setObject("payment"), card) ); }); return new SlimeJsonResponse(slime); } catch (DateTimeParseException e) { return ErrorResponse.badRequest("Could not parse date: " + until); } } private void getCollectionForTenant(Cursor tc, TenantName tenant) { var collection = billingController.getCollectionMethod(tenant); tc.setString("collection", collection.name()); } private HttpResponse addLineItem(HttpRequest request, String tenant) { Inspector inspector = inspectorOrThrow(request); billingController.addLineItem( TenantName.from(tenant), getInspectorFieldOrThrow(inspector, "description"), new BigDecimal(getInspectorFieldOrThrow(inspector, "amount")), userIdOrThrow(request)); return new MessageResponse("Added line item for tenant " + tenant); } private HttpResponse setInvoiceStatus(HttpRequest request, String invoiceId) { Inspector inspector = inspectorOrThrow(request); String status = getInspectorFieldOrThrow(inspector, "status"); billingController.updateInvoiceStatus(Invoice.Id.of(invoiceId), userIdOrThrow(request), status); return new MessageResponse("Updated status of invoice " + invoiceId); } private HttpResponse createInvoice(HttpRequest request, String userId) { Inspector inspector = inspectorOrThrow(request); TenantName tenantName = TenantName.from(getInspectorFieldOrThrow(inspector, "tenant")); LocalDate startDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "startTime")); LocalDate endDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "endTime")); ZonedDateTime startTime = startDate.atStartOfDay(ZoneId.of("UTC")); ZonedDateTime endTime = endDate.atStartOfDay(ZoneId.of("UTC")); var invoiceId = billingController.createInvoiceForPeriod(tenantName, startTime, endTime, userId); return new MessageResponse("Created invoice with ID " + invoiceId.value()); } private HttpResponse getInstruments(String tenant, String userId) { var instrumentListResponse = billingController.listInstruments(TenantName.from(tenant), userId); return new JacksonJsonResponse<>(200, instrumentListResponse); } private HttpResponse getToken(String tenant, String userId) { return new StringResponse(billingController.createClientToken(tenant, userId)); } private HttpResponse getBilling(String tenant, String until) { try { var untilDate = untilParameter(until); var tenantId = TenantName.from(tenant); var slimeResponse = new Slime(); var root = slimeResponse.setObject(); root.setString("until", untilDate.format(DateTimeFormatter.ISO_DATE)); getPlanForTenant(root, tenantId); renderCurrentUsage(root.setObject("current"), getCurrentUsageForTenant(tenantId, untilDate)); renderAdditionalItems(root.setObject("additional").setArray("items"), billingController.getUnusedLineItems(tenantId)); renderInvoices(root.setArray("bills"), getInvoicesForTenant(tenantId)); billingController.getDefaultInstrument(tenantId).ifPresent( card -> renderInstrument(root.setObject("payment"), card) ); root.setString("collection", billingController.getCollectionMethod(tenantId).name()); return new SlimeJsonResponse(slimeResponse); } catch (DateTimeParseException e) { return ErrorResponse.badRequest("Could not parse date: " + until); } } private HttpResponse getLineItems(String tenant) { var slimeResponse = new Slime(); var root = slimeResponse.setObject(); var lineItems = root.setArray("lineItems"); billingController.getUnusedLineItems(TenantName.from(tenant)) .forEach(lineItem -> { var itemCursor = lineItems.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); return new SlimeJsonResponse(slimeResponse); } private void getPlanForTenant(Cursor cursor, TenantName tenant) { PlanId plan = billingController.getPlan(tenant); cursor.setString("plan", plan.value()); cursor.setString("planName", billingController.getPlanDisplayName(plan)); } private void renderInstrument(Cursor cursor, PaymentInstrument instrument) { cursor.setString("pi-id", instrument.getId()); cursor.setString("type", instrument.getType()); cursor.setString("brand", instrument.getBrand()); cursor.setString("endingWith", instrument.getEndingWith()); cursor.setString("expiryDate", instrument.getExpiryDate()); cursor.setString("displayText", instrument.getDisplayText()); cursor.setString("nameOnCard", instrument.getNameOnCard()); cursor.setString("addressLine1", instrument.getAddressLine1()); cursor.setString("addressLine2", instrument.getAddressLine2()); cursor.setString("zip", instrument.getZip()); cursor.setString("city", instrument.getCity()); cursor.setString("state", instrument.getState()); cursor.setString("country", instrument.getCountry()); } private void renderCurrentUsage(Cursor cursor, Invoice currentUsage) { if (currentUsage == null) return; cursor.setString("amount", currentUsage.sum().toPlainString()); cursor.setString("status", "accrued"); cursor.setString("from", currentUsage.getStartTime().format(DATE_TIME_FORMATTER)); var itemsCursor = cursor.setArray("items"); currentUsage.lineItems().forEach(lineItem -> { var itemCursor = itemsCursor.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); } private void renderAdditionalItems(Cursor cursor, List<Invoice.LineItem> items) { items.forEach(item -> { renderLineItemToCursor(cursor.addObject(), item); }); } private Invoice getCurrentUsageForTenant(TenantName tenant, LocalDate until) { return billingController.createUncommittedInvoice(tenant, until); } private List<Invoice> getInvoicesForTenant(TenantName tenant) { return billingController.getInvoicesForTenant(tenant); } private void renderInvoices(Cursor cursor, List<Invoice> invoices) { invoices.forEach(invoice -> { var invoiceCursor = cursor.addObject(); renderInvoiceToCursor(invoiceCursor, invoice); }); } private void renderInvoiceToCursor(Cursor invoiceCursor, Invoice invoice) { invoiceCursor.setString("id", invoice.id().value()); invoiceCursor.setString("from", invoice.getStartTime().format(DATE_TIME_FORMATTER)); invoiceCursor.setString("to", invoice.getEndTime().format(DATE_TIME_FORMATTER)); invoiceCursor.setString("amount", invoice.sum().toString()); invoiceCursor.setString("status", invoice.status()); var statusCursor = invoiceCursor.setArray("statusHistory"); renderStatusHistory(statusCursor, invoice.statusHistory()); var lineItemsCursor = invoiceCursor.setArray("items"); invoice.lineItems().forEach(lineItem -> { var itemCursor = lineItemsCursor.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); } private void renderStatusHistory(Cursor cursor, Invoice.StatusHistory statusHistory) { statusHistory.getHistory() .entrySet() .stream() .forEach(entry -> { var c = cursor.addObject(); c.setString("at", entry.getKey().format(DATE_TIME_FORMATTER)); c.setString("status", entry.getValue()); }); } private void renderLineItemToCursor(Cursor cursor, Invoice.LineItem lineItem) { cursor.setString("id", lineItem.id()); cursor.setString("description", lineItem.description()); cursor.setString("amount", lineItem.amount().toString()); cursor.setString("plan", lineItem.plan()); cursor.setString("planName", billingController.getPlanDisplayName(PlanId.from(lineItem.plan()))); lineItem.applicationId().ifPresent(appId -> { cursor.setString("application", appId.application().value()); cursor.setString("instance", appId.instance().value()); }); lineItem.zoneId().ifPresent(zoneId -> cursor.setString("zone", zoneId.value()) ); lineItem.getCpuHours().ifPresent(cpuHours -> cursor.setString("cpuHours", cpuHours.toString()) ); lineItem.getMemoryHours().ifPresent(memoryHours -> cursor.setString("memoryHours", memoryHours.toString()) ); lineItem.getDiskHours().ifPresent(diskHours -> cursor.setString("diskHours", diskHours.toString()) ); lineItem.getCpuCost().ifPresent(cpuCost -> cursor.setString("cpuCost", cpuCost.toString()) ); lineItem.getMemoryCost().ifPresent(memoryCost -> cursor.setString("memoryCost", memoryCost.toString()) ); lineItem.getDiskCost().ifPresent(diskCost -> cursor.setString("diskCost", diskCost.toString()) ); } private HttpResponse deleteInstrument(String tenant, String userId, String instrument) { if (billingController.deleteInstrument(TenantName.from(tenant), userId, instrument)) { return new StringResponse("OK"); } else { return ErrorResponse.forbidden("Cannot delete payment instrument you don't own"); } } private HttpResponse deleteLineItem(String lineItemId) { billingController.deleteLineItem(lineItemId); return new MessageResponse("Succesfully deleted line item " + lineItemId); } private HttpResponse patchActiveInstrument(HttpRequest request, String tenant, String userId) { var inspector = inspectorOrThrow(request); String instrumentId = getInspectorFieldOrThrow(inspector, "active"); InstrumentOwner paymentInstrument = new InstrumentOwner(TenantName.from(tenant), userId, instrumentId, true); boolean success = billingController.setActivePaymentInstrument(paymentInstrument); return success ? new StringResponse("OK") : ErrorResponse.internalServerError("Failed to patch active instrument"); } private Inspector inspectorOrThrow(HttpRequest request) { try { return SlimeUtils.jsonToSlime(request.getData().readAllBytes()).get(); } catch (IOException e) { throw new BadRequestException("Failed to parse request body"); } } private static String userIdOrThrow(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().getUserPrincipal()) .map(Principal::getName) .orElseThrow(() -> new ForbiddenException("Must be authenticated to use this API")); } private static String getInspectorFieldOrThrow(Inspector inspector, String field) { if (!inspector.field(field).valid()) throw new BadRequestException("Field " + field + " cannot be null"); return inspector.field(field).asString(); } private LocalDate untilParameter(String until) { if (until == null || until.isEmpty() || until.isBlank()) return LocalDate.now().plusDays(1); return LocalDate.parse(until); } private boolean hasDeployments(TenantName tenantName) { return applicationController.asList(tenantName) .stream() .flatMap(app -> app.instances().values() .stream() .flatMap(instance -> instance.deployments().values().stream()) ) .count() > 0; } private static class CsvResponse extends HttpResponse { private final String[] header; private final List<Object[]> rows; CsvResponse(String[] header, List<Object[]> rows) { super(200); this.header = header; this.rows = rows; } @Override public void render(OutputStream outputStream) throws IOException { var writer = new OutputStreamWriter(outputStream); var printer = CSVFormat.DEFAULT.withRecordSeparator('\n').withHeader(this.header).print(writer); for (var row : this.rows) printer.printRecord(row); printer.flush(); } @Override public String getContentType() { return "text/csv; encoding=utf-8"; } } }
class BillingApiHandler extends LoggingRequestHandler { private static final String OPTIONAL_PREFIX = "/api"; private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd"); private final BillingController billingController; private final ApplicationController applicationController; private final TenantController tenantController; public BillingApiHandler(Executor executor, AccessLog accessLog, Controller controller) { super(executor, accessLog); this.billingController = controller.serviceRegistry().billingController(); this.applicationController = controller.applications(); this.tenantController = controller.tenants(); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri(), OPTIONAL_PREFIX); String userId = userIdOrThrow(request); switch (request.getMethod()) { case GET: return handleGET(request, path, userId); case PATCH: return handlePATCH(request, path, userId); case DELETE: return handleDELETE(path, userId); case POST: return handlePOST(path, request, userId); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (NotFoundException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (Exception e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError("Internal problem while handling billing API request"); } } private HttpResponse handleGET(HttpRequest request, Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/token")) return getToken(path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/instrument")) return getInstruments(path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/billing")) return getBilling(path.get("tenant"), request.getProperty("until")); if (path.matches("/billing/v1/tenant/{tenant}/plan")) return getPlan(path.get("tenant")); if (path.matches("/billing/v1/billing")) return getBillingAllTenants(request.getProperty("until")); if (path.matches("/billing/v1/invoice/export")) return getAllInvoices(); if (path.matches("/billing/v1/invoice/tenant/{tenant}/line-item")) return getLineItems(path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(HttpRequest request, Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/instrument")) return patchActiveInstrument(request, path.get("tenant"), userId); if (path.matches("/billing/v1/tenant/{tenant}/plan")) return patchPlan(request, path.get("tenant")); if (path.matches("/billing/v1/tenant/{tenant}/collection")) return patchCollectionMethod(request, path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, String userId) { if (path.matches("/billing/v1/tenant/{tenant}/instrument/{instrument}")) return deleteInstrument(path.get("tenant"), userId, path.get("instrument")); if (path.matches("/billing/v1/invoice/line-item/{line-item-id}")) return deleteLineItem(path.get("line-item-id")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request, String userId) { if (path.matches("/billing/v1/invoice")) return createInvoice(request, userId); if (path.matches("/billing/v1/invoice/{invoice-id}/status")) return setInvoiceStatus(request, path.get("invoice-id")); if (path.matches("/billing/v1/invoice/tenant/{tenant}/line-item")) return addLineItem(request, path.get("tenant")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse getPlan(String tenant) { var plan = billingController.getPlan(TenantName.from(tenant)); var slime = new Slime(); var root = slime.setObject(); root.setString("tenant", tenant); root.setString("plan", plan.value()); return new SlimeJsonResponse(slime); } private HttpResponse patchPlan(HttpRequest request, String tenant) { var tenantName = TenantName.from(tenant); var slime = inspectorOrThrow(request); var planId = PlanId.from(slime.field("plan").asString()); var hasDeployments = hasDeployments(tenantName); var result = billingController.setPlan(tenantName, planId, hasDeployments); if (result.isSuccess()) return new StringResponse("Plan: " + planId.value()); return ErrorResponse.forbidden(result.getErrorMessage().orElse("Invalid plan change")); } private HttpResponse patchCollectionMethod(HttpRequest request, String tenant) { var tenantName = TenantName.from(tenant); var slime = inspectorOrThrow(request); var newMethod = slime.field("collection").valid() ? slime.field("collection").asString().toUpperCase() : slime.field("collectionMethod").asString().toUpperCase(); if (newMethod.isEmpty()) return ErrorResponse.badRequest("No collection method specified"); try { var result = billingController.setCollectionMethod(tenantName, CollectionMethod.valueOf(newMethod)); if (result.isSuccess()) return new StringResponse("Collection method updated to " + newMethod); return ErrorResponse.forbidden(result.getErrorMessage().orElse("Invalid collection method change")); } catch (IllegalArgumentException iea){ return ErrorResponse.badRequest("Invalid collection method: " + newMethod); } } private HttpResponse getBillingAllTenants(String until) { try { var untilDate = untilParameter(until); var uncommittedInvoices = billingController.createUncommittedInvoices(untilDate); var slime = new Slime(); var root = slime.setObject(); root.setString("until", untilDate.format(DateTimeFormatter.ISO_DATE)); var tenants = root.setArray("tenants"); tenantController.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> { var invoice = uncommittedInvoices.get(tenant.name()); var tc = tenants.addObject(); tc.setString("tenant", tenant.name().value()); getPlanForTenant(tc, tenant.name()); getCollectionForTenant(tc, tenant.name()); renderCurrentUsage(tc.setObject("current"), invoice); renderAdditionalItems(tc.setObject("additional").setArray("items"), billingController.getUnusedLineItems(tenant.name())); billingController.getDefaultInstrument(tenant.name()).ifPresent(card -> renderInstrument(tc.setObject("payment"), card) ); }); return new SlimeJsonResponse(slime); } catch (DateTimeParseException e) { return ErrorResponse.badRequest("Could not parse date: " + until); } } private void getCollectionForTenant(Cursor tc, TenantName tenant) { var collection = billingController.getCollectionMethod(tenant); tc.setString("collection", collection.name()); } private HttpResponse addLineItem(HttpRequest request, String tenant) { Inspector inspector = inspectorOrThrow(request); billingController.addLineItem( TenantName.from(tenant), getInspectorFieldOrThrow(inspector, "description"), new BigDecimal(getInspectorFieldOrThrow(inspector, "amount")), userIdOrThrow(request)); return new MessageResponse("Added line item for tenant " + tenant); } private HttpResponse setInvoiceStatus(HttpRequest request, String invoiceId) { Inspector inspector = inspectorOrThrow(request); String status = getInspectorFieldOrThrow(inspector, "status"); billingController.updateInvoiceStatus(Invoice.Id.of(invoiceId), userIdOrThrow(request), status); return new MessageResponse("Updated status of invoice " + invoiceId); } private HttpResponse createInvoice(HttpRequest request, String userId) { Inspector inspector = inspectorOrThrow(request); TenantName tenantName = TenantName.from(getInspectorFieldOrThrow(inspector, "tenant")); LocalDate startDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "startTime")); LocalDate endDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "endTime")); ZonedDateTime startTime = startDate.atStartOfDay(ZoneId.of("UTC")); ZonedDateTime endTime = endDate.atStartOfDay(ZoneId.of("UTC")); var invoiceId = billingController.createInvoiceForPeriod(tenantName, startTime, endTime, userId); return new MessageResponse("Created invoice with ID " + invoiceId.value()); } private HttpResponse getInstruments(String tenant, String userId) { var instrumentListResponse = billingController.listInstruments(TenantName.from(tenant), userId); return new JacksonJsonResponse<>(200, instrumentListResponse); } private HttpResponse getToken(String tenant, String userId) { return new StringResponse(billingController.createClientToken(tenant, userId)); } private HttpResponse getBilling(String tenant, String until) { try { var untilDate = untilParameter(until); var tenantId = TenantName.from(tenant); var slimeResponse = new Slime(); var root = slimeResponse.setObject(); root.setString("until", untilDate.format(DateTimeFormatter.ISO_DATE)); getPlanForTenant(root, tenantId); renderCurrentUsage(root.setObject("current"), getCurrentUsageForTenant(tenantId, untilDate)); renderAdditionalItems(root.setObject("additional").setArray("items"), billingController.getUnusedLineItems(tenantId)); renderInvoices(root.setArray("bills"), getInvoicesForTenant(tenantId)); billingController.getDefaultInstrument(tenantId).ifPresent( card -> renderInstrument(root.setObject("payment"), card) ); root.setString("collection", billingController.getCollectionMethod(tenantId).name()); return new SlimeJsonResponse(slimeResponse); } catch (DateTimeParseException e) { return ErrorResponse.badRequest("Could not parse date: " + until); } } private HttpResponse getLineItems(String tenant) { var slimeResponse = new Slime(); var root = slimeResponse.setObject(); var lineItems = root.setArray("lineItems"); billingController.getUnusedLineItems(TenantName.from(tenant)) .forEach(lineItem -> { var itemCursor = lineItems.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); return new SlimeJsonResponse(slimeResponse); } private void getPlanForTenant(Cursor cursor, TenantName tenant) { PlanId plan = billingController.getPlan(tenant); cursor.setString("plan", plan.value()); cursor.setString("planName", billingController.getPlanDisplayName(plan)); } private void renderInstrument(Cursor cursor, PaymentInstrument instrument) { cursor.setString("pi-id", instrument.getId()); cursor.setString("type", instrument.getType()); cursor.setString("brand", instrument.getBrand()); cursor.setString("endingWith", instrument.getEndingWith()); cursor.setString("expiryDate", instrument.getExpiryDate()); cursor.setString("displayText", instrument.getDisplayText()); cursor.setString("nameOnCard", instrument.getNameOnCard()); cursor.setString("addressLine1", instrument.getAddressLine1()); cursor.setString("addressLine2", instrument.getAddressLine2()); cursor.setString("zip", instrument.getZip()); cursor.setString("city", instrument.getCity()); cursor.setString("state", instrument.getState()); cursor.setString("country", instrument.getCountry()); } private void renderCurrentUsage(Cursor cursor, Invoice currentUsage) { if (currentUsage == null) return; cursor.setString("amount", currentUsage.sum().toPlainString()); cursor.setString("status", "accrued"); cursor.setString("from", currentUsage.getStartTime().format(DATE_TIME_FORMATTER)); var itemsCursor = cursor.setArray("items"); currentUsage.lineItems().forEach(lineItem -> { var itemCursor = itemsCursor.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); } private void renderAdditionalItems(Cursor cursor, List<Invoice.LineItem> items) { items.forEach(item -> { renderLineItemToCursor(cursor.addObject(), item); }); } private Invoice getCurrentUsageForTenant(TenantName tenant, LocalDate until) { return billingController.createUncommittedInvoice(tenant, until); } private List<Invoice> getInvoicesForTenant(TenantName tenant) { return billingController.getInvoicesForTenant(tenant); } private void renderInvoices(Cursor cursor, List<Invoice> invoices) { invoices.forEach(invoice -> { var invoiceCursor = cursor.addObject(); renderInvoiceToCursor(invoiceCursor, invoice); }); } private void renderInvoiceToCursor(Cursor invoiceCursor, Invoice invoice) { invoiceCursor.setString("id", invoice.id().value()); invoiceCursor.setString("from", invoice.getStartTime().format(DATE_TIME_FORMATTER)); invoiceCursor.setString("to", invoice.getEndTime().format(DATE_TIME_FORMATTER)); invoiceCursor.setString("amount", invoice.sum().toString()); invoiceCursor.setString("status", invoice.status()); var statusCursor = invoiceCursor.setArray("statusHistory"); renderStatusHistory(statusCursor, invoice.statusHistory()); var lineItemsCursor = invoiceCursor.setArray("items"); invoice.lineItems().forEach(lineItem -> { var itemCursor = lineItemsCursor.addObject(); renderLineItemToCursor(itemCursor, lineItem); }); } private void renderStatusHistory(Cursor cursor, Invoice.StatusHistory statusHistory) { statusHistory.getHistory() .entrySet() .stream() .forEach(entry -> { var c = cursor.addObject(); c.setString("at", entry.getKey().format(DATE_TIME_FORMATTER)); c.setString("status", entry.getValue()); }); } private void renderLineItemToCursor(Cursor cursor, Invoice.LineItem lineItem) { cursor.setString("id", lineItem.id()); cursor.setString("description", lineItem.description()); cursor.setString("amount", lineItem.amount().toString()); cursor.setString("plan", lineItem.plan()); cursor.setString("planName", billingController.getPlanDisplayName(PlanId.from(lineItem.plan()))); lineItem.applicationId().ifPresent(appId -> { cursor.setString("application", appId.application().value()); cursor.setString("instance", appId.instance().value()); }); lineItem.zoneId().ifPresent(zoneId -> cursor.setString("zone", zoneId.value()) ); lineItem.getCpuHours().ifPresent(cpuHours -> cursor.setString("cpuHours", cpuHours.toString()) ); lineItem.getMemoryHours().ifPresent(memoryHours -> cursor.setString("memoryHours", memoryHours.toString()) ); lineItem.getDiskHours().ifPresent(diskHours -> cursor.setString("diskHours", diskHours.toString()) ); lineItem.getCpuCost().ifPresent(cpuCost -> cursor.setString("cpuCost", cpuCost.toString()) ); lineItem.getMemoryCost().ifPresent(memoryCost -> cursor.setString("memoryCost", memoryCost.toString()) ); lineItem.getDiskCost().ifPresent(diskCost -> cursor.setString("diskCost", diskCost.toString()) ); } private HttpResponse deleteInstrument(String tenant, String userId, String instrument) { if (billingController.deleteInstrument(TenantName.from(tenant), userId, instrument)) { return new StringResponse("OK"); } else { return ErrorResponse.forbidden("Cannot delete payment instrument you don't own"); } } private HttpResponse deleteLineItem(String lineItemId) { billingController.deleteLineItem(lineItemId); return new MessageResponse("Succesfully deleted line item " + lineItemId); } private HttpResponse patchActiveInstrument(HttpRequest request, String tenant, String userId) { var inspector = inspectorOrThrow(request); String instrumentId = getInspectorFieldOrThrow(inspector, "active"); InstrumentOwner paymentInstrument = new InstrumentOwner(TenantName.from(tenant), userId, instrumentId, true); boolean success = billingController.setActivePaymentInstrument(paymentInstrument); return success ? new StringResponse("OK") : ErrorResponse.internalServerError("Failed to patch active instrument"); } private Inspector inspectorOrThrow(HttpRequest request) { try { return SlimeUtils.jsonToSlime(request.getData().readAllBytes()).get(); } catch (IOException e) { throw new BadRequestException("Failed to parse request body"); } } private static String userIdOrThrow(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().getUserPrincipal()) .map(Principal::getName) .orElseThrow(() -> new ForbiddenException("Must be authenticated to use this API")); } private static String getInspectorFieldOrThrow(Inspector inspector, String field) { if (!inspector.field(field).valid()) throw new BadRequestException("Field " + field + " cannot be null"); return inspector.field(field).asString(); } private LocalDate untilParameter(String until) { if (until == null || until.isEmpty() || until.isBlank()) return LocalDate.now().plusDays(1); return LocalDate.parse(until); } private boolean hasDeployments(TenantName tenantName) { return applicationController.asList(tenantName) .stream() .flatMap(app -> app.instances().values() .stream() .flatMap(instance -> instance.deployments().values().stream()) ) .count() > 0; } private static class CsvResponse extends HttpResponse { private final String[] header; private final List<Object[]> rows; CsvResponse(String[] header, List<Object[]> rows) { super(200); this.header = header; this.rows = rows; } @Override public void render(OutputStream outputStream) throws IOException { var writer = new OutputStreamWriter(outputStream); var printer = CSVFormat.DEFAULT.withRecordSeparator('\n').withHeader(this.header).print(writer); for (var row : this.rows) printer.printRecord(row); printer.flush(); } @Override public String getContentType() { return "text/csv; encoding=utf-8"; } } }
Remove.
private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); }
private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); }
class Reconfigurer extends AbstractComponent { private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(newConfig); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } void reconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); List<String> originalServers = List.copyOf(servers(existingConfig)); log.log(Level.INFO, "Original servers: " + originalServers); List<String> joiningServers = servers(newConfig); List<String> leavingServers = setDifference(originalServers, joiningServers); List<String> addedServers = setDifference(joiningServers, leavingServers); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); int sessionTimeoutInSeconds = 30; try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(existingConfig), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, originalServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } } List<String> currentServers() { if (zooKeeperRunner == null) return List.of(); return servers(zooKeeperRunner.zookeeperServerConfig()); } List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } }
class Reconfigurer extends AbstractComponent { private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private static final int sessionTimeoutInSeconds = 30; private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(newConfig); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } void reconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); List<String> originalServers = List.copyOf(servers(existingConfig)); log.log(Level.INFO, "Original servers: " + originalServers); List<String> joiningServers = servers(newConfig); List<String> leavingServers = setDifference(originalServers, joiningServers); List<String> addedServers = setDifference(joiningServers, leavingServers); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(existingConfig), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, originalServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } } /** * Returns items in set a that are not in set b */ List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } }
Yes, that's right, thanks!
public void getConfig(ZookeeperServerConfig.Builder builder) { AbstractConfigProducer<?> parent = getParent(); if (parent == null) return; builder.myid(index()); }
if (parent == null) return;
public void getConfig(ZookeeperServerConfig.Builder builder) { builder.myid(index()); }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, boolean isHostedVespa) { this(parent, name, false, index, isHostedVespa); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, boolean isHostedVespa) { super(parent, name, retired, index, isHostedVespa); this.isHostedVespa = isHostedVespa; addComponent(getFS4ResourcePool()); } private static Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override public String getJvmOptions() { String jvmArgs = super.getJvmOptions(); return isHostedVespa && hasDocproc() ? ("".equals(jvmArgs) ? defaultHostedJVMArgs : defaultHostedJVMArgs + " " + jvmArgs) : jvmArgs; } private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, boolean isHostedVespa) { this(parent, name, false, index, isHostedVespa); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, boolean isHostedVespa) { super(parent, name, retired, index, isHostedVespa); this.isHostedVespa = isHostedVespa; addComponent(getFS4ResourcePool()); } private static Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override public String getJvmOptions() { String jvmArgs = super.getJvmOptions(); return isHostedVespa && hasDocproc() ? ("".equals(jvmArgs) ? defaultHostedJVMArgs : defaultHostedJVMArgs + " " + jvmArgs) : jvmArgs; } private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override }
Consider extracting as constant.
void reconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); List<String> originalServers = List.copyOf(servers(existingConfig)); log.log(Level.INFO, "Original servers: " + originalServers); List<String> joiningServers = servers(newConfig); List<String> leavingServers = setDifference(originalServers, joiningServers); List<String> addedServers = setDifference(joiningServers, leavingServers); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); int sessionTimeoutInSeconds = 30; try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(existingConfig), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, originalServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } }
int sessionTimeoutInSeconds = 30;
void reconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); List<String> originalServers = List.copyOf(servers(existingConfig)); log.log(Level.INFO, "Original servers: " + originalServers); List<String> joiningServers = servers(newConfig); List<String> leavingServers = setDifference(originalServers, joiningServers); List<String> addedServers = setDifference(joiningServers, leavingServers); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(existingConfig), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, originalServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } }
class Reconfigurer extends AbstractComponent { private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(newConfig); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); } List<String> currentServers() { if (zooKeeperRunner == null) return List.of(); return servers(zooKeeperRunner.zookeeperServerConfig()); } List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } }
class Reconfigurer extends AbstractComponent { private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private static final int sessionTimeoutInSeconds = 30; private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(newConfig); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); } /** * Returns items in set a that are not in set b */ List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } }
Consider testing that an equal config does not cause reconfiguration.
public void testStartupAndReconfigure() { Reconfigurer reconfigurer = new Reconfigurer(); reconfigurer.startOrReconfigure(createConfig(1)); assertFalse(reconfigurer.shouldReconfigure(createConfig(2))); assertTrue(reconfigurer.shouldReconfigure(createConfigAllowReconfiguring(2))); }
assertTrue(reconfigurer.shouldReconfigure(createConfigAllowReconfiguring(2)));
public void testStartupAndReconfigure() { Reconfigurer reconfigurer = new Reconfigurer(); reconfigurer.startOrReconfigure(createConfig(1)); assertFalse(reconfigurer.shouldReconfigure(createConfig(2))); assertTrue(reconfigurer.shouldReconfigure(createConfigAllowReconfiguring(2))); Reconfigurer reconfigurer2 = new Reconfigurer(); reconfigurer2.startOrReconfigure(createConfigAllowReconfiguring(1)); assertFalse(reconfigurer2.shouldReconfigure(createConfigAllowReconfiguring(1))); }
class ReconfigurerTest { private File cfgFile; private File idFile; @Rule public TemporaryFolder folder = new TemporaryFolder(); @Before public void setup() throws IOException { cfgFile = folder.newFile(); idFile = folder.newFile("myid"); } @Test private ZookeeperServerConfig createConfigAllowReconfiguring(int numberOfServers) { return createConfig(numberOfServers, true); } private ZookeeperServerConfig createConfig(int numberOfServers) { return createConfig(numberOfServers, false); } private ZookeeperServerConfig createConfig(int numberOfServers, boolean dynamicReconfiguration) { ZookeeperServerConfig.Builder builder = new ZookeeperServerConfig.Builder(); builder.zooKeeperConfigFile(cfgFile.getAbsolutePath()); builder.myidFile(idFile.getAbsolutePath()); IntStream.range(0, numberOfServers).forEach(i -> { builder.server(newServer(i, "localhost", i, i + 1)); }); builder.myid(0); builder.dynamicReconfiguration(dynamicReconfiguration); return builder.build(); } private ZookeeperServerConfig.Server.Builder newServer(int id, String hostName, int electionPort, int quorumPort) { ZookeeperServerConfig.Server.Builder builder = new ZookeeperServerConfig.Server.Builder(); builder.id(id); builder.hostname(hostName); builder.electionPort(electionPort); builder.quorumPort(quorumPort); return builder; } }
class ReconfigurerTest { private File cfgFile; private File idFile; @Rule public TemporaryFolder folder = new TemporaryFolder(); @Before public void setup() throws IOException { cfgFile = folder.newFile(); idFile = folder.newFile("myid"); } @Test private ZookeeperServerConfig createConfigAllowReconfiguring(int numberOfServers) { return createConfig(numberOfServers, true); } private ZookeeperServerConfig createConfig(int numberOfServers) { return createConfig(numberOfServers, false); } private ZookeeperServerConfig createConfig(int numberOfServers, boolean dynamicReconfiguration) { ZookeeperServerConfig.Builder builder = new ZookeeperServerConfig.Builder(); builder.zooKeeperConfigFile(cfgFile.getAbsolutePath()); builder.myidFile(idFile.getAbsolutePath()); IntStream.range(0, numberOfServers).forEach(i -> { builder.server(newServer(i, "localhost", i, i + 1)); }); builder.myid(0); builder.dynamicReconfiguration(dynamicReconfiguration); return builder.build(); } private ZookeeperServerConfig.Server.Builder newServer(int id, String hostName, int electionPort, int quorumPort) { ZookeeperServerConfig.Server.Builder builder = new ZookeeperServerConfig.Server.Builder(); builder.id(id); builder.hostname(hostName); builder.electionPort(electionPort); builder.quorumPort(quorumPort); return builder; } }
Hmm ... I like it better then way it is, but thanks :)
private void progress(DocumentType type, AtomicReference<Reindexing> reindexing, AtomicReference<Status> status) { switch (status.get().state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.get().state() + "'—not continuing reindexing of " + type); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); break; case FAILED: if (clock.instant().isBefore(status.get().endedAt().get().plus(failureGrace))) return; case READY: status.updateAndGet(Status::running); } AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status.updateAndGet(value -> value.progressed(token)); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing.updateAndGet(value -> value.with(type, status.get())), cluster.name()); metrics.dump(reindexing.get()); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; VisitorParameters parameters = createParameters(type, status.get().progress().orElse(null)); parameters.setControlHandler(control); Runnable sessionShutdown = visitorSessions.apply(parameters); log.log(FINE, () -> "Running reindexing of " + type); phaser.arriveAndAwaitAdvance(); sessionShutdown.run(); CompletionCode result = control.getResult() != null ? control.getResult().getCode() : CompletionCode.ABORTED; switch (result) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status.updateAndGet(value -> value.failed(clock.instant(), control.getResult().getMessage())); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status.updateAndGet(Status::halted); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.get().startedAt(), clock.instant())); status.updateAndGet(value -> value.successful(clock.instant())); } database.writeReindexing(reindexing.updateAndGet(value -> value.with(type, status.get())), cluster.name()); metrics.dump(reindexing.get()); }
case FAILED:
private void progress(DocumentType type, AtomicReference<Reindexing> reindexing, AtomicReference<Status> status) { switch (status.get().state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.get().state() + "'—not continuing reindexing of " + type); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); break; case FAILED: if (clock.instant().isBefore(status.get().endedAt().get().plus(failureGrace))) return; case READY: status.updateAndGet(Status::running); } AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status.updateAndGet(value -> value.progressed(token)); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing.updateAndGet(value -> value.with(type, status.get())), cluster.name()); metrics.dump(reindexing.get()); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; VisitorParameters parameters = createParameters(type, status.get().progress().orElse(null)); parameters.setControlHandler(control); Runnable sessionShutdown = visitorSessions.apply(parameters); log.log(FINE, () -> "Running reindexing of " + type); phaser.arriveAndAwaitAdvance(); sessionShutdown.run(); CompletionCode result = control.getResult() != null ? control.getResult().getCode() : CompletionCode.ABORTED; switch (result) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status.updateAndGet(value -> value.failed(clock.instant(), control.getResult().getMessage())); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status.updateAndGet(Status::halted); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.get().startedAt(), clock.instant())); status.updateAndGet(value -> value.successful(clock.instant())); } database.writeReindexing(reindexing.updateAndGet(value -> value.with(type, status.get())), cluster.name()); metrics.dump(reindexing.get()); }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); static final Duration failureGrace = Duration.ofMinutes(10); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final Function<VisitorParameters, Runnable> visitorSessions; private final ReindexingMetrics metrics; private final Clock clock; private final Phaser phaser = new Phaser(2); public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Metric metric, Clock clock) { this(cluster, ready, database, parameters -> { try { return access.createVisitorSession(parameters)::destroy; } catch (ParseException e) { throw new IllegalStateException(e); } }, metric, clock ); } Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, Function<VisitorParameters, Runnable> visitorSessions, Metric metric, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.visitorSessions = visitorSessions; this.metrics = new ReindexingMetrics(metric, cluster.name); this.clock = clock; database.initializeIfEmpty(cluster.name, ready, clock.instant()); } /** Lets the reindexer abort any ongoing visit session, wait for it to complete normally, then exit. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); AtomicReference<Reindexing> reindexing = new AtomicReference<>(database.readReindexing(cluster.name())); database.writeReindexing(reindexing.get(), cluster.name()); metrics.dump(reindexing.get()); try (Lock lock = database.lockReindexing(cluster.name())) { reindexing.set(updateWithReady(ready, reindexing.get(), clock.instant())); database.writeReindexing(reindexing.get(), cluster.name()); metrics.dump(reindexing.get()); for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type, reindexing, new AtomicReference<>(reindexing.get().status().get(type))); if (phaser.isTerminated()) break; } } } static Reindexing updateWithReady(Map<DocumentType, Instant> ready, Reindexing reindexing, Instant now) { for (DocumentType type : ready.keySet()) { if ( ! ready.get(type).isAfter(now)) { Status status = reindexing.status().getOrDefault(type, Status.ready(now)); if (status.startedAt().isBefore(ready.get(type))) status = Status.ready(now); reindexing = reindexing.with(type, status); } } return reindexing; } @SuppressWarnings("fallthrough") VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setThrottlePolicy(new DynamicThrottlePolicy().setWindowSizeIncrement(0.2) .setWindowSizeDecrementFactor(5) .setResizeRate(10) .setMinWindowSize(1)); parameters.setRemoteDataHandler(cluster.name()); parameters.setMaxPending(32); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.NORMAL_3); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); parameters.setMaxBucketsPerVisitor(1); parameters.setVisitorLibrary("ReindexingVisitor"); return parameters; } static class Cluster { private final String name; private final Map<DocumentType, String> documentBuckets; Cluster(String name, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Content:cluster=" + name + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); static final Duration failureGrace = Duration.ofMinutes(10); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final Function<VisitorParameters, Runnable> visitorSessions; private final ReindexingMetrics metrics; private final Clock clock; private final Phaser phaser = new Phaser(2); public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Metric metric, Clock clock) { this(cluster, ready, database, parameters -> { try { return access.createVisitorSession(parameters)::destroy; } catch (ParseException e) { throw new IllegalStateException(e); } }, metric, clock ); } Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, Function<VisitorParameters, Runnable> visitorSessions, Metric metric, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.visitorSessions = visitorSessions; this.metrics = new ReindexingMetrics(metric, cluster.name); this.clock = clock; database.initializeIfEmpty(cluster.name, ready, clock.instant()); } /** Lets the reindexer abort any ongoing visit session, wait for it to complete normally, then exit. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); AtomicReference<Reindexing> reindexing = new AtomicReference<>(database.readReindexing(cluster.name())); database.writeReindexing(reindexing.get(), cluster.name()); metrics.dump(reindexing.get()); try (Lock lock = database.lockReindexing(cluster.name())) { reindexing.set(updateWithReady(ready, reindexing.get(), clock.instant())); database.writeReindexing(reindexing.get(), cluster.name()); metrics.dump(reindexing.get()); for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type, reindexing, new AtomicReference<>(reindexing.get().status().get(type))); if (phaser.isTerminated()) break; } } } static Reindexing updateWithReady(Map<DocumentType, Instant> ready, Reindexing reindexing, Instant now) { for (DocumentType type : ready.keySet()) { if ( ! ready.get(type).isAfter(now)) { Status status = reindexing.status().getOrDefault(type, Status.ready(now)); if (status.startedAt().isBefore(ready.get(type))) status = Status.ready(now); reindexing = reindexing.with(type, status); } } return reindexing; } @SuppressWarnings("fallthrough") VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setThrottlePolicy(new DynamicThrottlePolicy().setWindowSizeIncrement(0.2) .setWindowSizeDecrementFactor(5) .setResizeRate(10) .setMinWindowSize(1)); parameters.setRemoteDataHandler(cluster.name()); parameters.setMaxPending(32); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.NORMAL_3); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); parameters.setMaxBucketsPerVisitor(1); parameters.setVisitorLibrary("ReindexingVisitor"); return parameters; } static class Cluster { private final String name; private final Map<DocumentType, String> documentBuckets; Cluster(String name, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Content:cluster=" + name + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
Fixed
private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); }
private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); }
class Reconfigurer extends AbstractComponent { private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(newConfig); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } void reconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); List<String> originalServers = List.copyOf(servers(existingConfig)); log.log(Level.INFO, "Original servers: " + originalServers); List<String> joiningServers = servers(newConfig); List<String> leavingServers = setDifference(originalServers, joiningServers); List<String> addedServers = setDifference(joiningServers, leavingServers); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); int sessionTimeoutInSeconds = 30; try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(existingConfig), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, originalServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } } List<String> currentServers() { if (zooKeeperRunner == null) return List.of(); return servers(zooKeeperRunner.zookeeperServerConfig()); } List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } }
class Reconfigurer extends AbstractComponent { private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private static final int sessionTimeoutInSeconds = 30; private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(newConfig); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } void reconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); List<String> originalServers = List.copyOf(servers(existingConfig)); log.log(Level.INFO, "Original servers: " + originalServers); List<String> joiningServers = servers(newConfig); List<String> leavingServers = setDifference(originalServers, joiningServers); List<String> addedServers = setDifference(joiningServers, leavingServers); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(existingConfig), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, originalServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } } /** * Returns items in set a that are not in set b */ List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } }
Done
void reconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); List<String> originalServers = List.copyOf(servers(existingConfig)); log.log(Level.INFO, "Original servers: " + originalServers); List<String> joiningServers = servers(newConfig); List<String> leavingServers = setDifference(originalServers, joiningServers); List<String> addedServers = setDifference(joiningServers, leavingServers); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); int sessionTimeoutInSeconds = 30; try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(existingConfig), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, originalServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } }
int sessionTimeoutInSeconds = 30;
void reconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); List<String> originalServers = List.copyOf(servers(existingConfig)); log.log(Level.INFO, "Original servers: " + originalServers); List<String> joiningServers = servers(newConfig); List<String> leavingServers = setDifference(originalServers, joiningServers); List<String> addedServers = setDifference(joiningServers, leavingServers); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(existingConfig), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, originalServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } }
class Reconfigurer extends AbstractComponent { private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(newConfig); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); } List<String> currentServers() { if (zooKeeperRunner == null) return List.of(); return servers(zooKeeperRunner.zookeeperServerConfig()); } List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } }
class Reconfigurer extends AbstractComponent { private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private static final int sessionTimeoutInSeconds = 30; private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(newConfig); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = zooKeeperRunner.zookeeperServerConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); } /** * Returns items in set a that are not in set b */ List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } }
Okay, and the 'oldest' thing is in case we're downgrading then. It might be good with some more explanation here but at least I agree this looks not wrong.
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
if (run.status() == RunStatus.success)
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
Good point, added
public void testStartupAndReconfigure() { Reconfigurer reconfigurer = new Reconfigurer(); reconfigurer.startOrReconfigure(createConfig(1)); assertFalse(reconfigurer.shouldReconfigure(createConfig(2))); assertTrue(reconfigurer.shouldReconfigure(createConfigAllowReconfiguring(2))); }
assertTrue(reconfigurer.shouldReconfigure(createConfigAllowReconfiguring(2)));
public void testStartupAndReconfigure() { Reconfigurer reconfigurer = new Reconfigurer(); reconfigurer.startOrReconfigure(createConfig(1)); assertFalse(reconfigurer.shouldReconfigure(createConfig(2))); assertTrue(reconfigurer.shouldReconfigure(createConfigAllowReconfiguring(2))); Reconfigurer reconfigurer2 = new Reconfigurer(); reconfigurer2.startOrReconfigure(createConfigAllowReconfiguring(1)); assertFalse(reconfigurer2.shouldReconfigure(createConfigAllowReconfiguring(1))); }
class ReconfigurerTest { private File cfgFile; private File idFile; @Rule public TemporaryFolder folder = new TemporaryFolder(); @Before public void setup() throws IOException { cfgFile = folder.newFile(); idFile = folder.newFile("myid"); } @Test private ZookeeperServerConfig createConfigAllowReconfiguring(int numberOfServers) { return createConfig(numberOfServers, true); } private ZookeeperServerConfig createConfig(int numberOfServers) { return createConfig(numberOfServers, false); } private ZookeeperServerConfig createConfig(int numberOfServers, boolean dynamicReconfiguration) { ZookeeperServerConfig.Builder builder = new ZookeeperServerConfig.Builder(); builder.zooKeeperConfigFile(cfgFile.getAbsolutePath()); builder.myidFile(idFile.getAbsolutePath()); IntStream.range(0, numberOfServers).forEach(i -> { builder.server(newServer(i, "localhost", i, i + 1)); }); builder.myid(0); builder.dynamicReconfiguration(dynamicReconfiguration); return builder.build(); } private ZookeeperServerConfig.Server.Builder newServer(int id, String hostName, int electionPort, int quorumPort) { ZookeeperServerConfig.Server.Builder builder = new ZookeeperServerConfig.Server.Builder(); builder.id(id); builder.hostname(hostName); builder.electionPort(electionPort); builder.quorumPort(quorumPort); return builder; } }
class ReconfigurerTest { private File cfgFile; private File idFile; @Rule public TemporaryFolder folder = new TemporaryFolder(); @Before public void setup() throws IOException { cfgFile = folder.newFile(); idFile = folder.newFile("myid"); } @Test private ZookeeperServerConfig createConfigAllowReconfiguring(int numberOfServers) { return createConfig(numberOfServers, true); } private ZookeeperServerConfig createConfig(int numberOfServers) { return createConfig(numberOfServers, false); } private ZookeeperServerConfig createConfig(int numberOfServers, boolean dynamicReconfiguration) { ZookeeperServerConfig.Builder builder = new ZookeeperServerConfig.Builder(); builder.zooKeeperConfigFile(cfgFile.getAbsolutePath()); builder.myidFile(idFile.getAbsolutePath()); IntStream.range(0, numberOfServers).forEach(i -> { builder.server(newServer(i, "localhost", i, i + 1)); }); builder.myid(0); builder.dynamicReconfiguration(dynamicReconfiguration); return builder.build(); } private ZookeeperServerConfig.Server.Builder newServer(int id, String hostName, int electionPort, int quorumPort) { ZookeeperServerConfig.Server.Builder builder = new ZookeeperServerConfig.Server.Builder(); builder.id(id); builder.hostname(hostName); builder.electionPort(electionPort); builder.quorumPort(quorumPort); return builder; } }
Consider adding a test of SAN URI mismatching as well
public void can_exact_match_policy_with_san_uri_pattern() { RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn"); RequiredPeerCredential sanUriRequirement = createRequiredCredential(SAN_URI, "myscheme: PeerAuthorizer authorizer = createPeerAuthorizer(createPolicy(POLICY_1, createRoles(ROLE_1), cnRequirement, sanUriRequirement)); AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", singletonList("foo.irrelevant.san"), singletonList("myscheme: assertAuthorized(result); assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1); assertThat(result.matchedPolicies()).containsOnly(POLICY_1); }
assertThat(result.matchedPolicies()).containsOnly(POLICY_1);
public void can_exact_match_policy_with_san_uri_pattern() { RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn"); RequiredPeerCredential sanUriRequirement = createRequiredCredential(SAN_URI, "myscheme: PeerAuthorizer authorizer = createPeerAuthorizer(createPolicy(POLICY_1, createRoles(ROLE_1), cnRequirement, sanUriRequirement)); AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", singletonList("foo.irrelevant.san"), singletonList("myscheme: assertAuthorized(result); assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1); assertThat(result.matchedPolicies()).containsOnly(POLICY_1); assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.matching.cn", emptyList(), singletonList("myscheme: }
class PeerAuthorizerTest { private static final KeyPair KEY_PAIR = KeyUtils.generateKeypair(KeyAlgorithm.EC); private static final String ROLE_1 = "role-1", ROLE_2 = "role-2", ROLE_3 = "role-3", POLICY_1 = "policy-1", POLICY_2 = "policy-2"; @Test public void certificate_must_match_both_san_and_cn_pattern() { RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn"); RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san"); PeerAuthorizer authorizer = createPeerAuthorizer(createPolicy(POLICY_1, createRoles(ROLE_1), cnRequirement, sanRequirement)); AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", asList("foo.matching.san", "foo.invalid.san"), emptyList())); assertAuthorized(result); assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1); assertThat(result.matchedPolicies()).containsOnly(POLICY_1); assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", singletonList("foo.matching.san"), emptyList()))); assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", asList("foo.matching.san", "foo.invalid.san"),emptyList()))); assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.matching.cn", singletonList("foo.invalid.san"), emptyList()))); } @Test public void can_match_multiple_policies() { RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn"); RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san"); PeerAuthorizer peerAuthorizer = createPeerAuthorizer( createPolicy(POLICY_1, createRoles(ROLE_1, ROLE_2), cnRequirement, sanRequirement), createPolicy(POLICY_2, createRoles(ROLE_2, ROLE_3), cnRequirement, sanRequirement)); AuthorizationResult result = peerAuthorizer .authorizePeer(createCertificate("foo.matching.cn", singletonList("foo.matching.san"), emptyList())); assertAuthorized(result); assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1, ROLE_2, ROLE_3); assertThat(result.matchedPolicies()).containsOnly(POLICY_1, POLICY_2); } @Test public void can_match_subset_of_policies() { PeerAuthorizer peerAuthorizer = createPeerAuthorizer( createPolicy(POLICY_1, createRoles(ROLE_1), createRequiredCredential(CN, "*.matching.cn")), createPolicy(POLICY_2, createRoles(ROLE_1, ROLE_2), createRequiredCredential(SAN_DNS, "*.matching.san"))); AuthorizationResult result = peerAuthorizer.authorizePeer(createCertificate("foo.invalid.cn", singletonList("foo.matching.san"), emptyList())); assertAuthorized(result); assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1, ROLE_2); assertThat(result.matchedPolicies()).containsOnly(POLICY_2); } @Test public void must_match_all_cn_and_san_patterns() { RequiredPeerCredential cnSuffixRequirement = createRequiredCredential(CN, "*.*.matching.suffix.cn"); RequiredPeerCredential cnPrefixRequirement = createRequiredCredential(CN, "matching.prefix.*.*.*"); RequiredPeerCredential sanPrefixRequirement = createRequiredCredential(SAN_DNS, "*.*.matching.suffix.san"); RequiredPeerCredential sanSuffixRequirement = createRequiredCredential(SAN_DNS, "matching.prefix.*.*.*"); PeerAuthorizer peerAuthorizer = createPeerAuthorizer( createPolicy(POLICY_1, emptySet(), cnSuffixRequirement, cnPrefixRequirement, sanPrefixRequirement, sanSuffixRequirement)); assertAuthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", singletonList("matching.prefix.matching.suffix.san"), emptyList()))); assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", singletonList("matching.prefix.invalid.suffix.san"), emptyList()))); assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("invalid.prefix.matching.suffix.cn", singletonList("matching.prefix.matching.suffix.san"), emptyList()))); } @Test private static X509Certificate createCertificate(String subjectCn, List<String> sanDns, List<String> sanUri) { X509CertificateBuilder builder = X509CertificateBuilder.fromKeypair( KEY_PAIR, new X500Principal("CN=" + subjectCn), Instant.EPOCH, Instant.EPOCH.plus(100000, ChronoUnit.DAYS), SHA256_WITH_ECDSA, BigInteger.ONE); sanDns.forEach(san -> builder.addSubjectAlternativeName(Type.DNS_NAME, san)); sanUri.forEach(san -> builder.addSubjectAlternativeName(Type.UNIFORM_RESOURCE_IDENTIFIER, san)); return builder.build(); } private static RequiredPeerCredential createRequiredCredential(Field field, String pattern) { return RequiredPeerCredential.of(field, pattern); } private static Set<Role> createRoles(String... roleNames) { return Arrays.stream(roleNames).map(Role::new).collect(toSet()); } private static PeerAuthorizer createPeerAuthorizer(PeerPolicy... policies) { return new PeerAuthorizer(new AuthorizedPeers(Arrays.stream(policies).collect(toSet()))); } private static PeerPolicy createPolicy(String name, Set<Role> roles, RequiredPeerCredential... requiredCredentials) { return new PeerPolicy(name, roles, asList(requiredCredentials)); } private static void assertAuthorized(AuthorizationResult result) { assertTrue(result.succeeded()); } private static void assertUnauthorized(AuthorizationResult result) { assertFalse(result.succeeded()); } }
class PeerAuthorizerTest { private static final KeyPair KEY_PAIR = KeyUtils.generateKeypair(KeyAlgorithm.EC); private static final String ROLE_1 = "role-1", ROLE_2 = "role-2", ROLE_3 = "role-3", POLICY_1 = "policy-1", POLICY_2 = "policy-2"; @Test public void certificate_must_match_both_san_and_cn_pattern() { RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn"); RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san"); PeerAuthorizer authorizer = createPeerAuthorizer(createPolicy(POLICY_1, createRoles(ROLE_1), cnRequirement, sanRequirement)); AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", asList("foo.matching.san", "foo.invalid.san"), emptyList())); assertAuthorized(result); assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1); assertThat(result.matchedPolicies()).containsOnly(POLICY_1); assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", singletonList("foo.matching.san"), emptyList()))); assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", asList("foo.matching.san", "foo.invalid.san"),emptyList()))); assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.matching.cn", singletonList("foo.invalid.san"), emptyList()))); } @Test public void can_match_multiple_policies() { RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn"); RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san"); PeerAuthorizer peerAuthorizer = createPeerAuthorizer( createPolicy(POLICY_1, createRoles(ROLE_1, ROLE_2), cnRequirement, sanRequirement), createPolicy(POLICY_2, createRoles(ROLE_2, ROLE_3), cnRequirement, sanRequirement)); AuthorizationResult result = peerAuthorizer .authorizePeer(createCertificate("foo.matching.cn", singletonList("foo.matching.san"), emptyList())); assertAuthorized(result); assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1, ROLE_2, ROLE_3); assertThat(result.matchedPolicies()).containsOnly(POLICY_1, POLICY_2); } @Test public void can_match_subset_of_policies() { PeerAuthorizer peerAuthorizer = createPeerAuthorizer( createPolicy(POLICY_1, createRoles(ROLE_1), createRequiredCredential(CN, "*.matching.cn")), createPolicy(POLICY_2, createRoles(ROLE_1, ROLE_2), createRequiredCredential(SAN_DNS, "*.matching.san"))); AuthorizationResult result = peerAuthorizer.authorizePeer(createCertificate("foo.invalid.cn", singletonList("foo.matching.san"), emptyList())); assertAuthorized(result); assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1, ROLE_2); assertThat(result.matchedPolicies()).containsOnly(POLICY_2); } @Test public void must_match_all_cn_and_san_patterns() { RequiredPeerCredential cnSuffixRequirement = createRequiredCredential(CN, "*.*.matching.suffix.cn"); RequiredPeerCredential cnPrefixRequirement = createRequiredCredential(CN, "matching.prefix.*.*.*"); RequiredPeerCredential sanPrefixRequirement = createRequiredCredential(SAN_DNS, "*.*.matching.suffix.san"); RequiredPeerCredential sanSuffixRequirement = createRequiredCredential(SAN_DNS, "matching.prefix.*.*.*"); PeerAuthorizer peerAuthorizer = createPeerAuthorizer( createPolicy(POLICY_1, emptySet(), cnSuffixRequirement, cnPrefixRequirement, sanPrefixRequirement, sanSuffixRequirement)); assertAuthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", singletonList("matching.prefix.matching.suffix.san"), emptyList()))); assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", singletonList("matching.prefix.invalid.suffix.san"), emptyList()))); assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("invalid.prefix.matching.suffix.cn", singletonList("matching.prefix.matching.suffix.san"), emptyList()))); } @Test private static X509Certificate createCertificate(String subjectCn, List<String> sanDns, List<String> sanUri) { X509CertificateBuilder builder = X509CertificateBuilder.fromKeypair( KEY_PAIR, new X500Principal("CN=" + subjectCn), Instant.EPOCH, Instant.EPOCH.plus(100000, ChronoUnit.DAYS), SHA256_WITH_ECDSA, BigInteger.ONE); sanDns.forEach(san -> builder.addSubjectAlternativeName(Type.DNS_NAME, san)); sanUri.forEach(san -> builder.addSubjectAlternativeName(Type.UNIFORM_RESOURCE_IDENTIFIER, san)); return builder.build(); } private static RequiredPeerCredential createRequiredCredential(Field field, String pattern) { return RequiredPeerCredential.of(field, pattern); } private static Set<Role> createRoles(String... roleNames) { return Arrays.stream(roleNames).map(Role::new).collect(toSet()); } private static PeerAuthorizer createPeerAuthorizer(PeerPolicy... policies) { return new PeerAuthorizer(new AuthorizedPeers(Arrays.stream(policies).collect(toSet()))); } private static PeerPolicy createPolicy(String name, Set<Role> roles, RequiredPeerCredential... requiredCredentials) { return new PeerPolicy(name, roles, asList(requiredCredentials)); } private static void assertAuthorized(AuthorizationResult result) { assertTrue(result.succeeded()); } private static void assertUnauthorized(AuthorizationResult result) { assertFalse(result.succeeded()); } }
Wrong package and bundle?
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) { Element zkElement = XML.getChild(spec, "zookeeper"); if (zkElement == null) return; Element nodesElement = XML.getChild(spec, "nodes"); boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of"); if (isCombined) { throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper"); } int nodeCount = cluster.getContainers().size(); if (nodeCount < MIN_ZOOKEEPER_NODE_COUNT || nodeCount > MAX_ZOOKEEPER_NODE_COUNT || nodeCount % 2 == 0) { throw new IllegalArgumentException("Clusters running ZooKeeper must have an odd number of nodes, between " + MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT); } cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade"); cluster.addSimpleComponent("com.yahoo.vespa.curator.ReconfigurableVespaZooKeeperServer", null, "zkfacade"); cluster.addSimpleComponent("com.yahoo.vespa.curator.Reconfigurer", null, "zkfacade"); }
cluster.addSimpleComponent("com.yahoo.vespa.curator.ReconfigurableVespaZooKeeperServer", null, "zkfacade");
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) { Element zkElement = XML.getChild(spec, "zookeeper"); if (zkElement == null) return; Element nodesElement = XML.getChild(spec, "nodes"); boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of"); if (isCombined) { throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper"); } int nodeCount = cluster.getContainers().size(); if (nodeCount < MIN_ZOOKEEPER_NODE_COUNT || nodeCount > MAX_ZOOKEEPER_NODE_COUNT || nodeCount % 2 == 0) { throw new IllegalArgumentException("Clusters running ZooKeeper must have an odd number of nodes, between " + MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT); } cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade"); cluster.addSimpleComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", null, "zookeeper-server"); cluster.addSimpleComponent("com.yahoo.vespa.zookeeper.Reconfigurer", null, "zookeeper-server"); }
class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html"); private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE"; private static final String CONTAINER_TAG = "container"; private static final String DEPRECATED_CONTAINER_TAG = "jdisc"; private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables"; private static final int MIN_ZOOKEEPER_NODE_COUNT = 3; private static final int MAX_ZOOKEEPER_NODE_COUNT = 7; public enum Networking { disable, enable } private ApplicationPackage app; private final boolean standaloneBuilder; private final Networking networking; private final boolean rpcServerEnabled; private final boolean httpServerEnabled; protected DeployLogger log; public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG)); private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName(); private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName(); public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) { super(ContainerModel.class); this.standaloneBuilder = standaloneBuilder; this.networking = networking; this.rpcServerEnabled = !standaloneBuilder; this.httpServerEnabled = networking == Networking.enable; } @Override public List<ConfigModelId> handlesElements() { return configModelIds; } @Override public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) { log = modelContext.getDeployLogger(); app = modelContext.getApplicationPackage(); checkVersion(spec); checkTagName(spec, log); ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext); addClusterContent(cluster, spec, modelContext); cluster.setMessageBusEnabled(rpcServerEnabled); cluster.setRpcServerEnabled(rpcServerEnabled); cluster.setHttpServerEnabled(httpServerEnabled); model.setCluster(cluster); } private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) { return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() { @Override protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(), modelContext.getProducerId(), deployState); } }.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec); } private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { DeployState deployState = context.getDeployState(); DocumentFactoryBuilder.buildDocumentFactories(cluster, spec); addConfiguredComponents(deployState, cluster, spec); addSecretStore(cluster, spec); addRestApis(deployState, spec, cluster); addServlets(deployState, spec, cluster); addModelEvaluation(spec, cluster, context); addProcessing(deployState, spec, cluster); addSearch(deployState, spec, cluster); addDocproc(deployState, spec, cluster); addDocumentApi(spec, cluster); cluster.addDefaultHandlersExceptStatus(); addStatusHandlers(cluster, context.getDeployState().isHosted()); addUserHandlers(deployState, cluster, spec); addHttp(deployState, spec, cluster, context); addAccessLogs(deployState, cluster, spec); addRoutingAliases(cluster, spec, deployState.zone().environment()); addNodes(cluster, spec, context); addClientProviders(deployState, spec, cluster); addServerProviders(deployState, spec, cluster); addAthensCopperArgos(cluster, context); addZooKeeper(cluster, spec); } private void addSecretStore(ApplicationContainerCluster cluster, Element spec) { Element secretStoreElement = XML.getChild(spec, "secret-store"); if (secretStoreElement != null) { SecretStore secretStore = new SecretStore(); for (Element group : XML.getChildren(secretStoreElement, "group")) { secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment")); } cluster.setSecretStore(secretStore); } } private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) { if ( ! context.getDeployState().isHosted()) return; app.getDeployment().map(DeploymentSpec::fromXml) .ifPresent(deploymentSpec -> { addIdentityProvider(cluster, context.getDeployState().getProperties().configServerSpecs(), context.getDeployState().getProperties().loadBalancerName(), context.getDeployState().getProperties().ztsUrl(), context.getDeployState().getProperties().athenzDnsSuffix(), context.getDeployState().zone(), deploymentSpec); addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec); }); } private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) { cluster.getContainers().forEach(container -> { setRotations(container, endpoints, cluster.getName()); container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec))); }); } private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) { Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance()); if (instance.isEmpty()) return false; return instance.get().zones().stream() .anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) && declaredZone.active()); } private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) { var rotationsProperty = endpoints.stream() .filter(endpoint -> endpoint.clusterId().equals(containerClusterName)) .flatMap(endpoint -> endpoint.names().stream()) .collect(Collectors.toUnmodifiableSet()); container.setProp("rotations", String.join(",", rotationsProperty)); } private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) { if (environment != Environment.prod) return; Element aliases = XML.getChild(spec, "aliases"); for (Element alias : XML.getChildren(aliases, "service-alias")) { cluster.serviceAliases().add(XML.getValue(alias)); } for (Element alias : XML.getChildren(aliases, "endpoint-alias")) { cluster.endpointAliases().add(XML.getValue(alias)); } } private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element components : XML.getChildren(spec, "components")) { addIncludes(components); addConfiguredComponents(deployState, cluster, components, "component"); } addConfiguredComponents(deployState, cluster, spec, "component"); } protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) { if (isHostedVespa) { String name = "status.html"; Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING)); cluster.addComponent( new FileStatusHandlerComponent( name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE), SystemBindingPattern.fromHttpPath("/" + name))); } else { cluster.addVipHandler(); } } private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element clientSpec: XML.getChildren(spec, "client")) { cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec)); } } private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { addConfiguredComponents(deployState, cluster, spec, "server"); } private void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { List<Element> accessLogElements = getAccessLogElements(spec); for (Element accessLog : accessLogElements) { AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent); } if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) cluster.addDefaultSearchAccessLog(); } private List<Element> getAccessLogElements(Element spec) { return XML.getChildren(spec, "accesslog"); } private void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element httpElement = XML.getChild(spec, "http"); if (httpElement != null) { cluster.setHttp(buildHttp(deployState, cluster, httpElement)); } if (isHostedTenantApplication(context)) { addHostedImplicitHttpIfNotPresent(cluster); addHostedImplicitAccessControlIfNotPresent(deployState, cluster); addDefaultConnectorHostedFilterBinding(cluster); addAdditionalHostedConnector(deployState, cluster, context); } } private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) { cluster.getHttp().getAccessControl() .ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ; } private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) { JettyHttpServer server = cluster.getHttp().getHttpServer().get(); String serverName = server.getComponentId().getName(); HostedSslConnectorFactory connectorFactory; if (deployState.endpointCertificateSecrets().isPresent()) { boolean authorizeClient = deployState.zone().system().isPublic(); if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) { throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https: } EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get(); boolean enforceHandshakeClientAuth = context.properties().useAccessControlTlsHandshakeClientAuth() && cluster.getHttp().getAccessControl() .map(accessControl -> accessControl.clientAuthentication) .map(clientAuth -> clientAuth.equals(AccessControl.ClientAuthentication.need)) .orElse(false); connectorFactory = authorizeClient ? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get()) : HostedSslConnectorFactory.withProvidedCertificate(serverName, endpointCertificateSecrets, enforceHandshakeClientAuth); } else { connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName); } cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory)); server.addConnector(connectorFactory); } private static boolean isHostedTenantApplication(ConfigModelContext context) { var deployState = context.getDeployState(); boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester(); return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication; } private static void addHostedImplicitHttpIfNotPresent(ApplicationContainerCluster cluster) { if(cluster.getHttp() == null) { cluster.setHttp(new Http(new FilterChains(cluster))); } JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null); if (httpServer == null) { httpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"), cluster, cluster.isHostedVespa()); cluster.getHttp().setHttpServer(httpServer); } int defaultPort = Defaults.getDefaults().vespaWebServicePort(); boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort); if (!defaultConnectorPresent) { httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build()); } } private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) { Http http = cluster.getHttp(); if (http.getAccessControl().isPresent()) return; AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); if (tenantDomain == null) return; new AccessControl.Builder(tenantDomain.value()) .setHandlers(cluster) .readEnabled(false) .writeEnabled(false) .clientAuthentication(AccessControl.ClientAuthentication.need) .build() .configureHttpFilterChains(http); } private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) { Http http = new HttpBuilder().build(deployState, cluster, httpElement); if (networking == Networking.disable) http.removeAllServers(); return http; } private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element restApiElem : XML.getChildren(spec, "rest-api")) { cluster.addRestApi( new RestApiBuilder().build(deployState, cluster, restApiElem)); } } private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element servletElem : XML.getChildren(spec, "servlet")) cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem)); } private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) { ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec); if (containerDocumentApi == null) return; cluster.setDocumentApi(containerDocumentApi); } private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec); if (containerDocproc == null) return; cluster.setDocproc(containerDocproc); ContainerDocproc.Options docprocOptions = containerDocproc.options; cluster.setMbusParams(new ApplicationContainerCluster.MbusParams( docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory)); } private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element searchElement = XML.getChild(spec, "search"); if (searchElement == null) return; addIncludes(searchElement); cluster.setSearch(buildSearch(deployState, cluster, searchElement)); addSearchHandler(cluster, searchElement, deployState); addGUIHandler(cluster); validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement); } private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element modelEvaluationElement = XML.getChild(spec, "model-evaluation"); if (modelEvaluationElement == null) return; RankProfileList profiles = context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty; cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles)); } private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element processingElement = XML.getChild(spec, "processing"); if (processingElement == null) return; addIncludes(processingElement); cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement), serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new)); validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement); } private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) { SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec); ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options()); applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch); containerSearch.setQueryProfiles(deployState.getQueryProfiles()); containerSearch.setSemanticRules(deployState.getSemanticRules()); return containerSearch; } private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) { PageTemplates.validate(applicationPackage); containerSearch.setPageTemplates(PageTemplates.create(applicationPackage)); } private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element component: XML.getChildren(spec, "handler")) { cluster.addComponent( new DomHandlerBuilder(cluster).build(deployState, cluster, component)); } } private void checkVersion(Element spec) { String version = spec.getAttribute("version"); if ( ! Version.fromString(version).equals(new Version(1))) { throw new RuntimeException("Expected container version to be 1.0, but got " + version); } } private void checkTagName(Element spec, DeployLogger logger) { if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) { logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead."); } } private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { if (standaloneBuilder) addStandaloneNode(cluster); else addNodesFromXml(cluster, spec, context); } private void addStandaloneNode(ApplicationContainerCluster cluster) { ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa()); cluster.addContainers(Collections.singleton(container)); } static boolean incompatibleGCOptions(String jvmargs) { Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC"); Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS"); return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find()); } private static String buildJvmGCOptions(DeployState deployState, String jvmGCOPtions) { String options = (jvmGCOPtions != null) ? jvmGCOPtions : deployState.getProperties().jvmGCOptions(); return (options == null ||options.isEmpty()) ? (deployState.isHosted() ? ContainerCluster.CMS : ContainerCluster.G1GC) : options; } private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options."); } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); if (incompatibleGCOptions(jvmOptions)) { deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'"); cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } private static String extractAttribute(Element element, String attrName) { return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null; } void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger())); if (!cluster.getJvmGCOptions().isPresent()) { String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); } void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element jvmElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS)); applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } /** * Add nodes to cluster according to the given containerElement. * * Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set * of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed * simultaneously for all active config models. */ private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) { Element nodesElement = XML.getChild(containerElement, "nodes"); if (nodesElement == null) { cluster.addContainers(allocateWithoutNodesTag(cluster, context)); } else { List<ApplicationContainer> nodes = createNodes(cluster, nodesElement, context); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) { extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context); } else { extractJvmTag(nodes, cluster, jvmElement, context); } applyRoutingAliasProperties(nodes, cluster); applyDefaultPreload(nodes, nodesElement); String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)); if (environmentVars != null && !environmentVars.isEmpty()) { cluster.setEnvironmentVars(environmentVars); } if (useCpuSocketAffinity(nodesElement)) AbstractService.distributeCpuSocketAffinity(nodes); cluster.addContainers(nodes); } } private static String getEnvironmentVariables(Element environmentVariables) { StringBuilder sb = new StringBuilder(); if (environmentVariables != null) { for (Element var: XML.getChildren(environmentVariables)) { sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' '); } } return sb.toString(); } private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { if (nodesElement.hasAttribute("type")) return createNodesFromNodeType(cluster, nodesElement, context); else if (nodesElement.hasAttribute("of")) return createNodesFromContentServiceReference(cluster, nodesElement, context); else if (nodesElement.hasAttribute("count")) return createNodesFromNodeCount(cluster, nodesElement, context); else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed()) return createNodesFromNodeCount(cluster, nodesElement, context); else return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement); } private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) { if (!cluster.serviceAliases().isEmpty()) { result.forEach(container -> { container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(","))); }); } if (!cluster.endpointAliases().isEmpty()) { result.forEach(container -> { container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(","))); }); } } private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) { if (memoryPercentage == null || memoryPercentage.isEmpty()) return; memoryPercentage = memoryPercentage.trim(); if ( ! memoryPercentage.endsWith("%")) throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim(); try { cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage)); } catch (NumberFormatException e) { throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); } } /** Allocate a container cluster without a nodes tag */ private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) { DeployState deployState = context.getDeployState(); HostSystem hostSystem = cluster.hostSystem(); if (deployState.isHosted()) { ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(deployState.getWantedNodeVespaVersion()) .dockerImageRepository(deployState.getWantedDockerImageRepo()) .build(); int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1; Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()), false, !deployState.getProperties().isBootstrap()); var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log); return createNodesFromHosts(log, hosts, cluster); } return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context); } private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) { ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa()); node.setHostResource(host); node.initService(context.getDeployLogger()); return List.of(node); } private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(), ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodeType type = NodeType.valueOf(nodesElement.getAttribute("type")); ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(context.getDeployState().getWantedNodeVespaVersion()) .dockerImageRepository(context.getDeployState().getWantedDockerImageRepo()) .build(); Map<HostResource, ClusterMembership> hosts = cluster.getRoot().hostSystem().allocateHosts(clusterSpec, Capacity.fromRequiredNodeType(type), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodeSpecification; try { nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(cluster + " contains an invalid reference", e); } String referenceId = nodesElement.getAttribute("of"); cluster.setHostClusterId(referenceId); Map<HostResource, ClusterMembership> hosts = StorageGroup.provisionHosts(nodeSpecification, referenceId, cluster.getRoot().hostSystem(), context.getDeployLogger()); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) { List<ApplicationContainer> nodes = new ArrayList<>(); for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) { String id = "container." + entry.getValue().index(); ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa()); container.setHostResource(entry.getKey()); container.initService(deployLogger); nodes.add(container); } return nodes; } private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) { List<ApplicationContainer> nodes = new ArrayList<>(); int nodeIndex = 0; for (Element nodeElem: XML.getChildren(nodesElement, "node")) { nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem)); nodeIndex++; } return nodes; } private static boolean useCpuSocketAffinity(Element nodesElement) { if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)) return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)); else return false; } private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) { for (Container container: containers) { if (container.getAssignedJvmOptions().isEmpty()) container.prependJvmOptions(jvmArgs); } } private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) { if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return; for (Container container: containers) container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)); } private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement, DeployState deployState) { cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(), "com.yahoo.search.searchchain.ExecutionFactory")); cluster.addComponent( new SearchHandler( cluster, serverBindings(searchElement, SearchHandler.DEFAULT_BINDING), ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null))); } private void addGUIHandler(ApplicationContainerCluster cluster) { Handler<?> guiHandler = new GUIHandler(); guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH)); cluster.addComponent(guiHandler); } private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) { List<Element> bindings = XML.getChildren(searchElement, "binding"); if (bindings.isEmpty()) return List.of(defaultBindings); return toBindingList(bindings); } private List<BindingPattern> toBindingList(List<Element> bindingElements) { List<BindingPattern> result = new ArrayList<>(); for (Element element: bindingElements) { String text = element.getTextContent().trim(); if (!text.isEmpty()) result.add(UserBindingPattern.fromPattern(text)); } return result; } private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) { Element documentApiElement = XML.getChild(spec, "document-api"); if (documentApiElement == null) return null; ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement); return new ContainerDocumentApi(cluster, documentApiOptions); } private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { Element docprocElement = XML.getChild(spec, "document-processing"); if (docprocElement == null) return null; addIncludes(docprocElement); DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement); ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement); return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder); } private void addIncludes(Element parentElement) { List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE); if (includes == null || includes.isEmpty()) { return; } if (app == null) { throw new IllegalArgumentException("Element <include> given in XML config, but no application package given."); } for (Element include : includes) { addInclude(parentElement, include); } } private void addInclude(Element parentElement, Element include) { String dirName = include.getAttribute(IncludeDirs.DIR); app.validateIncludeDir(dirName); List<Element> includedFiles = Xml.allElemsFromPath(app, dirName); for (Element includedFile : includedFiles) { List<Element> includedSubElements = XML.getChildren(includedFile); for (Element includedSubElement : includedSubElements) { Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true); parentElement.appendChild(copiedNode); } } } private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName) { for (Element node : XML.getChildren(spec, componentName)) { cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName, Consumer<Element> elementValidator) { for (Element node : XML.getChildren(spec, componentName)) { elementValidator.accept(node); cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private void addIdentityProvider(ApplicationContainerCluster cluster, List<ConfigServerSpec> configServerSpecs, HostName loadBalancerName, URI ztsUrl, String athenzDnsSuffix, Zone zone, DeploymentSpec spec) { spec.athenzDomain() .ifPresent(domain -> { AthenzService service = spec.instance(app.getApplicationId().instance()) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> spec.athenzService()) .orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'")); String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix; IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone); cluster.addComponent(identityProvider); cluster.getContainers().forEach(container -> { container.setProp("identity.domain", domain.value()); container.setProp("identity.service", service.value()); }); }); } private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) { return Optional.ofNullable(loadbalancerName) .orElseGet( () -> HostName.from(configServerSpecs.stream() .findFirst() .map(ConfigServerSpec::getHostName) .orElse("unknown") )); } /** Disallow renderers named "XmlRenderer" or "JsonRenderer" */ private static void validateRendererElement(Element element) { String idAttr = element.getAttribute("id"); if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) { throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr)); } } public static boolean isContainerTag(Element element) { return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName()); } }
class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html"); private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE"; private static final String CONTAINER_TAG = "container"; private static final String DEPRECATED_CONTAINER_TAG = "jdisc"; private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables"; private static final int MIN_ZOOKEEPER_NODE_COUNT = 3; private static final int MAX_ZOOKEEPER_NODE_COUNT = 7; public enum Networking { disable, enable } private ApplicationPackage app; private final boolean standaloneBuilder; private final Networking networking; private final boolean rpcServerEnabled; private final boolean httpServerEnabled; protected DeployLogger log; public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG)); private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName(); private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName(); public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) { super(ContainerModel.class); this.standaloneBuilder = standaloneBuilder; this.networking = networking; this.rpcServerEnabled = !standaloneBuilder; this.httpServerEnabled = networking == Networking.enable; } @Override public List<ConfigModelId> handlesElements() { return configModelIds; } @Override public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) { log = modelContext.getDeployLogger(); app = modelContext.getApplicationPackage(); checkVersion(spec); checkTagName(spec, log); ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext); addClusterContent(cluster, spec, modelContext); cluster.setMessageBusEnabled(rpcServerEnabled); cluster.setRpcServerEnabled(rpcServerEnabled); cluster.setHttpServerEnabled(httpServerEnabled); model.setCluster(cluster); } private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) { return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() { @Override protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(), modelContext.getProducerId(), deployState); } }.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec); } private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { DeployState deployState = context.getDeployState(); DocumentFactoryBuilder.buildDocumentFactories(cluster, spec); addConfiguredComponents(deployState, cluster, spec); addSecretStore(cluster, spec); addRestApis(deployState, spec, cluster); addServlets(deployState, spec, cluster); addModelEvaluation(spec, cluster, context); addProcessing(deployState, spec, cluster); addSearch(deployState, spec, cluster); addDocproc(deployState, spec, cluster); addDocumentApi(spec, cluster); cluster.addDefaultHandlersExceptStatus(); addStatusHandlers(cluster, context.getDeployState().isHosted()); addUserHandlers(deployState, cluster, spec); addHttp(deployState, spec, cluster, context); addAccessLogs(deployState, cluster, spec); addRoutingAliases(cluster, spec, deployState.zone().environment()); addNodes(cluster, spec, context); addClientProviders(deployState, spec, cluster); addServerProviders(deployState, spec, cluster); addAthensCopperArgos(cluster, context); addZooKeeper(cluster, spec); } private void addSecretStore(ApplicationContainerCluster cluster, Element spec) { Element secretStoreElement = XML.getChild(spec, "secret-store"); if (secretStoreElement != null) { SecretStore secretStore = new SecretStore(); for (Element group : XML.getChildren(secretStoreElement, "group")) { secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment")); } cluster.setSecretStore(secretStore); } } private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) { if ( ! context.getDeployState().isHosted()) return; app.getDeployment().map(DeploymentSpec::fromXml) .ifPresent(deploymentSpec -> { addIdentityProvider(cluster, context.getDeployState().getProperties().configServerSpecs(), context.getDeployState().getProperties().loadBalancerName(), context.getDeployState().getProperties().ztsUrl(), context.getDeployState().getProperties().athenzDnsSuffix(), context.getDeployState().zone(), deploymentSpec); addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec); }); } private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) { cluster.getContainers().forEach(container -> { setRotations(container, endpoints, cluster.getName()); container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec))); }); } private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) { Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance()); if (instance.isEmpty()) return false; return instance.get().zones().stream() .anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) && declaredZone.active()); } private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) { var rotationsProperty = endpoints.stream() .filter(endpoint -> endpoint.clusterId().equals(containerClusterName)) .flatMap(endpoint -> endpoint.names().stream()) .collect(Collectors.toUnmodifiableSet()); container.setProp("rotations", String.join(",", rotationsProperty)); } private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) { if (environment != Environment.prod) return; Element aliases = XML.getChild(spec, "aliases"); for (Element alias : XML.getChildren(aliases, "service-alias")) { cluster.serviceAliases().add(XML.getValue(alias)); } for (Element alias : XML.getChildren(aliases, "endpoint-alias")) { cluster.endpointAliases().add(XML.getValue(alias)); } } private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element components : XML.getChildren(spec, "components")) { addIncludes(components); addConfiguredComponents(deployState, cluster, components, "component"); } addConfiguredComponents(deployState, cluster, spec, "component"); } protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) { if (isHostedVespa) { String name = "status.html"; Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING)); cluster.addComponent( new FileStatusHandlerComponent( name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE), SystemBindingPattern.fromHttpPath("/" + name))); } else { cluster.addVipHandler(); } } private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element clientSpec: XML.getChildren(spec, "client")) { cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec)); } } private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { addConfiguredComponents(deployState, cluster, spec, "server"); } private void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { List<Element> accessLogElements = getAccessLogElements(spec); for (Element accessLog : accessLogElements) { AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent); } if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) cluster.addDefaultSearchAccessLog(); } private List<Element> getAccessLogElements(Element spec) { return XML.getChildren(spec, "accesslog"); } private void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element httpElement = XML.getChild(spec, "http"); if (httpElement != null) { cluster.setHttp(buildHttp(deployState, cluster, httpElement)); } if (isHostedTenantApplication(context)) { addHostedImplicitHttpIfNotPresent(cluster); addHostedImplicitAccessControlIfNotPresent(deployState, cluster); addDefaultConnectorHostedFilterBinding(cluster); addAdditionalHostedConnector(deployState, cluster, context); } } private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) { cluster.getHttp().getAccessControl() .ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ; } private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) { JettyHttpServer server = cluster.getHttp().getHttpServer().get(); String serverName = server.getComponentId().getName(); HostedSslConnectorFactory connectorFactory; if (deployState.endpointCertificateSecrets().isPresent()) { boolean authorizeClient = deployState.zone().system().isPublic(); if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) { throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https: } EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get(); boolean enforceHandshakeClientAuth = context.properties().useAccessControlTlsHandshakeClientAuth() && cluster.getHttp().getAccessControl() .map(accessControl -> accessControl.clientAuthentication) .map(clientAuth -> clientAuth.equals(AccessControl.ClientAuthentication.need)) .orElse(false); connectorFactory = authorizeClient ? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get()) : HostedSslConnectorFactory.withProvidedCertificate(serverName, endpointCertificateSecrets, enforceHandshakeClientAuth); } else { connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName); } cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory)); server.addConnector(connectorFactory); } private static boolean isHostedTenantApplication(ConfigModelContext context) { var deployState = context.getDeployState(); boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester(); return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication; } private static void addHostedImplicitHttpIfNotPresent(ApplicationContainerCluster cluster) { if(cluster.getHttp() == null) { cluster.setHttp(new Http(new FilterChains(cluster))); } JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null); if (httpServer == null) { httpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"), cluster, cluster.isHostedVespa()); cluster.getHttp().setHttpServer(httpServer); } int defaultPort = Defaults.getDefaults().vespaWebServicePort(); boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort); if (!defaultConnectorPresent) { httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build()); } } private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) { Http http = cluster.getHttp(); if (http.getAccessControl().isPresent()) return; AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); if (tenantDomain == null) return; new AccessControl.Builder(tenantDomain.value()) .setHandlers(cluster) .readEnabled(false) .writeEnabled(false) .clientAuthentication(AccessControl.ClientAuthentication.need) .build() .configureHttpFilterChains(http); } private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) { Http http = new HttpBuilder().build(deployState, cluster, httpElement); if (networking == Networking.disable) http.removeAllServers(); return http; } private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element restApiElem : XML.getChildren(spec, "rest-api")) { cluster.addRestApi( new RestApiBuilder().build(deployState, cluster, restApiElem)); } } private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element servletElem : XML.getChildren(spec, "servlet")) cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem)); } private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) { ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec); if (containerDocumentApi == null) return; cluster.setDocumentApi(containerDocumentApi); } private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec); if (containerDocproc == null) return; cluster.setDocproc(containerDocproc); ContainerDocproc.Options docprocOptions = containerDocproc.options; cluster.setMbusParams(new ApplicationContainerCluster.MbusParams( docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory)); } private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element searchElement = XML.getChild(spec, "search"); if (searchElement == null) return; addIncludes(searchElement); cluster.setSearch(buildSearch(deployState, cluster, searchElement)); addSearchHandler(cluster, searchElement, deployState); addGUIHandler(cluster); validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement); } private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element modelEvaluationElement = XML.getChild(spec, "model-evaluation"); if (modelEvaluationElement == null) return; RankProfileList profiles = context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty; cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles)); } private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element processingElement = XML.getChild(spec, "processing"); if (processingElement == null) return; addIncludes(processingElement); cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement), serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new)); validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement); } private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) { SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec); ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options()); applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch); containerSearch.setQueryProfiles(deployState.getQueryProfiles()); containerSearch.setSemanticRules(deployState.getSemanticRules()); return containerSearch; } private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) { PageTemplates.validate(applicationPackage); containerSearch.setPageTemplates(PageTemplates.create(applicationPackage)); } private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element component: XML.getChildren(spec, "handler")) { cluster.addComponent( new DomHandlerBuilder(cluster).build(deployState, cluster, component)); } } private void checkVersion(Element spec) { String version = spec.getAttribute("version"); if ( ! Version.fromString(version).equals(new Version(1))) { throw new RuntimeException("Expected container version to be 1.0, but got " + version); } } private void checkTagName(Element spec, DeployLogger logger) { if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) { logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead."); } } private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { if (standaloneBuilder) addStandaloneNode(cluster); else addNodesFromXml(cluster, spec, context); } private void addStandaloneNode(ApplicationContainerCluster cluster) { ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa()); cluster.addContainers(Collections.singleton(container)); } static boolean incompatibleGCOptions(String jvmargs) { Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC"); Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS"); return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find()); } private static String buildJvmGCOptions(DeployState deployState, String jvmGCOPtions) { String options = (jvmGCOPtions != null) ? jvmGCOPtions : deployState.getProperties().jvmGCOptions(); return (options == null ||options.isEmpty()) ? (deployState.isHosted() ? ContainerCluster.CMS : ContainerCluster.G1GC) : options; } private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options."); } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); if (incompatibleGCOptions(jvmOptions)) { deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'"); cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } private static String extractAttribute(Element element, String attrName) { return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null; } void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger())); if (!cluster.getJvmGCOptions().isPresent()) { String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); } void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element jvmElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS)); applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } /** * Add nodes to cluster according to the given containerElement. * * Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set * of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed * simultaneously for all active config models. */ private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) { Element nodesElement = XML.getChild(containerElement, "nodes"); if (nodesElement == null) { cluster.addContainers(allocateWithoutNodesTag(cluster, context)); } else { List<ApplicationContainer> nodes = createNodes(cluster, nodesElement, context); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) { extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context); } else { extractJvmTag(nodes, cluster, jvmElement, context); } applyRoutingAliasProperties(nodes, cluster); applyDefaultPreload(nodes, nodesElement); String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)); if (environmentVars != null && !environmentVars.isEmpty()) { cluster.setEnvironmentVars(environmentVars); } if (useCpuSocketAffinity(nodesElement)) AbstractService.distributeCpuSocketAffinity(nodes); cluster.addContainers(nodes); } } private static String getEnvironmentVariables(Element environmentVariables) { StringBuilder sb = new StringBuilder(); if (environmentVariables != null) { for (Element var: XML.getChildren(environmentVariables)) { sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' '); } } return sb.toString(); } private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { if (nodesElement.hasAttribute("type")) return createNodesFromNodeType(cluster, nodesElement, context); else if (nodesElement.hasAttribute("of")) return createNodesFromContentServiceReference(cluster, nodesElement, context); else if (nodesElement.hasAttribute("count")) return createNodesFromNodeCount(cluster, nodesElement, context); else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed()) return createNodesFromNodeCount(cluster, nodesElement, context); else return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement); } private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) { if (!cluster.serviceAliases().isEmpty()) { result.forEach(container -> { container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(","))); }); } if (!cluster.endpointAliases().isEmpty()) { result.forEach(container -> { container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(","))); }); } } private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) { if (memoryPercentage == null || memoryPercentage.isEmpty()) return; memoryPercentage = memoryPercentage.trim(); if ( ! memoryPercentage.endsWith("%")) throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim(); try { cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage)); } catch (NumberFormatException e) { throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); } } /** Allocate a container cluster without a nodes tag */ private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) { DeployState deployState = context.getDeployState(); HostSystem hostSystem = cluster.hostSystem(); if (deployState.isHosted()) { ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(deployState.getWantedNodeVespaVersion()) .dockerImageRepository(deployState.getWantedDockerImageRepo()) .build(); int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1; Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()), false, !deployState.getProperties().isBootstrap()); var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log); return createNodesFromHosts(log, hosts, cluster); } return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context); } private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) { ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa()); node.setHostResource(host); node.initService(context.getDeployLogger()); return List.of(node); } private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(), ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodeType type = NodeType.valueOf(nodesElement.getAttribute("type")); ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(context.getDeployState().getWantedNodeVespaVersion()) .dockerImageRepository(context.getDeployState().getWantedDockerImageRepo()) .build(); Map<HostResource, ClusterMembership> hosts = cluster.getRoot().hostSystem().allocateHosts(clusterSpec, Capacity.fromRequiredNodeType(type), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodeSpecification; try { nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(cluster + " contains an invalid reference", e); } String referenceId = nodesElement.getAttribute("of"); cluster.setHostClusterId(referenceId); Map<HostResource, ClusterMembership> hosts = StorageGroup.provisionHosts(nodeSpecification, referenceId, cluster.getRoot().hostSystem(), context.getDeployLogger()); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) { List<ApplicationContainer> nodes = new ArrayList<>(); for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) { String id = "container." + entry.getValue().index(); ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa()); container.setHostResource(entry.getKey()); container.initService(deployLogger); nodes.add(container); } return nodes; } private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) { List<ApplicationContainer> nodes = new ArrayList<>(); int nodeIndex = 0; for (Element nodeElem: XML.getChildren(nodesElement, "node")) { nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem)); nodeIndex++; } return nodes; } private static boolean useCpuSocketAffinity(Element nodesElement) { if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)) return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)); else return false; } private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) { for (Container container: containers) { if (container.getAssignedJvmOptions().isEmpty()) container.prependJvmOptions(jvmArgs); } } private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) { if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return; for (Container container: containers) container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)); } private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement, DeployState deployState) { cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(), "com.yahoo.search.searchchain.ExecutionFactory")); cluster.addComponent( new SearchHandler( cluster, serverBindings(searchElement, SearchHandler.DEFAULT_BINDING), ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null))); } private void addGUIHandler(ApplicationContainerCluster cluster) { Handler<?> guiHandler = new GUIHandler(); guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH)); cluster.addComponent(guiHandler); } private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) { List<Element> bindings = XML.getChildren(searchElement, "binding"); if (bindings.isEmpty()) return List.of(defaultBindings); return toBindingList(bindings); } private List<BindingPattern> toBindingList(List<Element> bindingElements) { List<BindingPattern> result = new ArrayList<>(); for (Element element: bindingElements) { String text = element.getTextContent().trim(); if (!text.isEmpty()) result.add(UserBindingPattern.fromPattern(text)); } return result; } private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) { Element documentApiElement = XML.getChild(spec, "document-api"); if (documentApiElement == null) return null; ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement); return new ContainerDocumentApi(cluster, documentApiOptions); } private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { Element docprocElement = XML.getChild(spec, "document-processing"); if (docprocElement == null) return null; addIncludes(docprocElement); DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement); ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement); return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder); } private void addIncludes(Element parentElement) { List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE); if (includes == null || includes.isEmpty()) { return; } if (app == null) { throw new IllegalArgumentException("Element <include> given in XML config, but no application package given."); } for (Element include : includes) { addInclude(parentElement, include); } } private void addInclude(Element parentElement, Element include) { String dirName = include.getAttribute(IncludeDirs.DIR); app.validateIncludeDir(dirName); List<Element> includedFiles = Xml.allElemsFromPath(app, dirName); for (Element includedFile : includedFiles) { List<Element> includedSubElements = XML.getChildren(includedFile); for (Element includedSubElement : includedSubElements) { Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true); parentElement.appendChild(copiedNode); } } } private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName) { for (Element node : XML.getChildren(spec, componentName)) { cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName, Consumer<Element> elementValidator) { for (Element node : XML.getChildren(spec, componentName)) { elementValidator.accept(node); cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private void addIdentityProvider(ApplicationContainerCluster cluster, List<ConfigServerSpec> configServerSpecs, HostName loadBalancerName, URI ztsUrl, String athenzDnsSuffix, Zone zone, DeploymentSpec spec) { spec.athenzDomain() .ifPresent(domain -> { AthenzService service = spec.instance(app.getApplicationId().instance()) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> spec.athenzService()) .orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'")); String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix; IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone); cluster.addComponent(identityProvider); cluster.getContainers().forEach(container -> { container.setProp("identity.domain", domain.value()); container.setProp("identity.service", service.value()); }); }); } private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) { return Optional.ofNullable(loadbalancerName) .orElseGet( () -> HostName.from(configServerSpecs.stream() .findFirst() .map(ConfigServerSpec::getHostName) .orElse("unknown") )); } /** Disallow renderers named "XmlRenderer" or "JsonRenderer" */ private static void validateRendererElement(Element element) { String idAttr = element.getAttribute("id"); if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) { throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr)); } } public static boolean isContainerTag(Element element) { return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName()); } }
Yes, will fix
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) { Element zkElement = XML.getChild(spec, "zookeeper"); if (zkElement == null) return; Element nodesElement = XML.getChild(spec, "nodes"); boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of"); if (isCombined) { throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper"); } int nodeCount = cluster.getContainers().size(); if (nodeCount < MIN_ZOOKEEPER_NODE_COUNT || nodeCount > MAX_ZOOKEEPER_NODE_COUNT || nodeCount % 2 == 0) { throw new IllegalArgumentException("Clusters running ZooKeeper must have an odd number of nodes, between " + MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT); } cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade"); cluster.addSimpleComponent("com.yahoo.vespa.curator.ReconfigurableVespaZooKeeperServer", null, "zkfacade"); cluster.addSimpleComponent("com.yahoo.vespa.curator.Reconfigurer", null, "zkfacade"); }
cluster.addSimpleComponent("com.yahoo.vespa.curator.ReconfigurableVespaZooKeeperServer", null, "zkfacade");
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) { Element zkElement = XML.getChild(spec, "zookeeper"); if (zkElement == null) return; Element nodesElement = XML.getChild(spec, "nodes"); boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of"); if (isCombined) { throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper"); } int nodeCount = cluster.getContainers().size(); if (nodeCount < MIN_ZOOKEEPER_NODE_COUNT || nodeCount > MAX_ZOOKEEPER_NODE_COUNT || nodeCount % 2 == 0) { throw new IllegalArgumentException("Clusters running ZooKeeper must have an odd number of nodes, between " + MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT); } cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade"); cluster.addSimpleComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", null, "zookeeper-server"); cluster.addSimpleComponent("com.yahoo.vespa.zookeeper.Reconfigurer", null, "zookeeper-server"); }
class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html"); private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE"; private static final String CONTAINER_TAG = "container"; private static final String DEPRECATED_CONTAINER_TAG = "jdisc"; private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables"; private static final int MIN_ZOOKEEPER_NODE_COUNT = 3; private static final int MAX_ZOOKEEPER_NODE_COUNT = 7; public enum Networking { disable, enable } private ApplicationPackage app; private final boolean standaloneBuilder; private final Networking networking; private final boolean rpcServerEnabled; private final boolean httpServerEnabled; protected DeployLogger log; public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG)); private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName(); private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName(); public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) { super(ContainerModel.class); this.standaloneBuilder = standaloneBuilder; this.networking = networking; this.rpcServerEnabled = !standaloneBuilder; this.httpServerEnabled = networking == Networking.enable; } @Override public List<ConfigModelId> handlesElements() { return configModelIds; } @Override public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) { log = modelContext.getDeployLogger(); app = modelContext.getApplicationPackage(); checkVersion(spec); checkTagName(spec, log); ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext); addClusterContent(cluster, spec, modelContext); cluster.setMessageBusEnabled(rpcServerEnabled); cluster.setRpcServerEnabled(rpcServerEnabled); cluster.setHttpServerEnabled(httpServerEnabled); model.setCluster(cluster); } private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) { return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() { @Override protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(), modelContext.getProducerId(), deployState); } }.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec); } private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { DeployState deployState = context.getDeployState(); DocumentFactoryBuilder.buildDocumentFactories(cluster, spec); addConfiguredComponents(deployState, cluster, spec); addSecretStore(cluster, spec); addRestApis(deployState, spec, cluster); addServlets(deployState, spec, cluster); addModelEvaluation(spec, cluster, context); addProcessing(deployState, spec, cluster); addSearch(deployState, spec, cluster); addDocproc(deployState, spec, cluster); addDocumentApi(spec, cluster); cluster.addDefaultHandlersExceptStatus(); addStatusHandlers(cluster, context.getDeployState().isHosted()); addUserHandlers(deployState, cluster, spec); addHttp(deployState, spec, cluster, context); addAccessLogs(deployState, cluster, spec); addRoutingAliases(cluster, spec, deployState.zone().environment()); addNodes(cluster, spec, context); addClientProviders(deployState, spec, cluster); addServerProviders(deployState, spec, cluster); addAthensCopperArgos(cluster, context); addZooKeeper(cluster, spec); } private void addSecretStore(ApplicationContainerCluster cluster, Element spec) { Element secretStoreElement = XML.getChild(spec, "secret-store"); if (secretStoreElement != null) { SecretStore secretStore = new SecretStore(); for (Element group : XML.getChildren(secretStoreElement, "group")) { secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment")); } cluster.setSecretStore(secretStore); } } private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) { if ( ! context.getDeployState().isHosted()) return; app.getDeployment().map(DeploymentSpec::fromXml) .ifPresent(deploymentSpec -> { addIdentityProvider(cluster, context.getDeployState().getProperties().configServerSpecs(), context.getDeployState().getProperties().loadBalancerName(), context.getDeployState().getProperties().ztsUrl(), context.getDeployState().getProperties().athenzDnsSuffix(), context.getDeployState().zone(), deploymentSpec); addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec); }); } private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) { cluster.getContainers().forEach(container -> { setRotations(container, endpoints, cluster.getName()); container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec))); }); } private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) { Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance()); if (instance.isEmpty()) return false; return instance.get().zones().stream() .anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) && declaredZone.active()); } private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) { var rotationsProperty = endpoints.stream() .filter(endpoint -> endpoint.clusterId().equals(containerClusterName)) .flatMap(endpoint -> endpoint.names().stream()) .collect(Collectors.toUnmodifiableSet()); container.setProp("rotations", String.join(",", rotationsProperty)); } private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) { if (environment != Environment.prod) return; Element aliases = XML.getChild(spec, "aliases"); for (Element alias : XML.getChildren(aliases, "service-alias")) { cluster.serviceAliases().add(XML.getValue(alias)); } for (Element alias : XML.getChildren(aliases, "endpoint-alias")) { cluster.endpointAliases().add(XML.getValue(alias)); } } private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element components : XML.getChildren(spec, "components")) { addIncludes(components); addConfiguredComponents(deployState, cluster, components, "component"); } addConfiguredComponents(deployState, cluster, spec, "component"); } protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) { if (isHostedVespa) { String name = "status.html"; Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING)); cluster.addComponent( new FileStatusHandlerComponent( name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE), SystemBindingPattern.fromHttpPath("/" + name))); } else { cluster.addVipHandler(); } } private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element clientSpec: XML.getChildren(spec, "client")) { cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec)); } } private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { addConfiguredComponents(deployState, cluster, spec, "server"); } private void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { List<Element> accessLogElements = getAccessLogElements(spec); for (Element accessLog : accessLogElements) { AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent); } if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) cluster.addDefaultSearchAccessLog(); } private List<Element> getAccessLogElements(Element spec) { return XML.getChildren(spec, "accesslog"); } private void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element httpElement = XML.getChild(spec, "http"); if (httpElement != null) { cluster.setHttp(buildHttp(deployState, cluster, httpElement)); } if (isHostedTenantApplication(context)) { addHostedImplicitHttpIfNotPresent(cluster); addHostedImplicitAccessControlIfNotPresent(deployState, cluster); addDefaultConnectorHostedFilterBinding(cluster); addAdditionalHostedConnector(deployState, cluster, context); } } private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) { cluster.getHttp().getAccessControl() .ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ; } private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) { JettyHttpServer server = cluster.getHttp().getHttpServer().get(); String serverName = server.getComponentId().getName(); HostedSslConnectorFactory connectorFactory; if (deployState.endpointCertificateSecrets().isPresent()) { boolean authorizeClient = deployState.zone().system().isPublic(); if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) { throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https: } EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get(); boolean enforceHandshakeClientAuth = context.properties().useAccessControlTlsHandshakeClientAuth() && cluster.getHttp().getAccessControl() .map(accessControl -> accessControl.clientAuthentication) .map(clientAuth -> clientAuth.equals(AccessControl.ClientAuthentication.need)) .orElse(false); connectorFactory = authorizeClient ? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get()) : HostedSslConnectorFactory.withProvidedCertificate(serverName, endpointCertificateSecrets, enforceHandshakeClientAuth); } else { connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName); } cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory)); server.addConnector(connectorFactory); } private static boolean isHostedTenantApplication(ConfigModelContext context) { var deployState = context.getDeployState(); boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester(); return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication; } private static void addHostedImplicitHttpIfNotPresent(ApplicationContainerCluster cluster) { if(cluster.getHttp() == null) { cluster.setHttp(new Http(new FilterChains(cluster))); } JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null); if (httpServer == null) { httpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"), cluster, cluster.isHostedVespa()); cluster.getHttp().setHttpServer(httpServer); } int defaultPort = Defaults.getDefaults().vespaWebServicePort(); boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort); if (!defaultConnectorPresent) { httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build()); } } private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) { Http http = cluster.getHttp(); if (http.getAccessControl().isPresent()) return; AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); if (tenantDomain == null) return; new AccessControl.Builder(tenantDomain.value()) .setHandlers(cluster) .readEnabled(false) .writeEnabled(false) .clientAuthentication(AccessControl.ClientAuthentication.need) .build() .configureHttpFilterChains(http); } private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) { Http http = new HttpBuilder().build(deployState, cluster, httpElement); if (networking == Networking.disable) http.removeAllServers(); return http; } private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element restApiElem : XML.getChildren(spec, "rest-api")) { cluster.addRestApi( new RestApiBuilder().build(deployState, cluster, restApiElem)); } } private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element servletElem : XML.getChildren(spec, "servlet")) cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem)); } private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) { ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec); if (containerDocumentApi == null) return; cluster.setDocumentApi(containerDocumentApi); } private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec); if (containerDocproc == null) return; cluster.setDocproc(containerDocproc); ContainerDocproc.Options docprocOptions = containerDocproc.options; cluster.setMbusParams(new ApplicationContainerCluster.MbusParams( docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory)); } private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element searchElement = XML.getChild(spec, "search"); if (searchElement == null) return; addIncludes(searchElement); cluster.setSearch(buildSearch(deployState, cluster, searchElement)); addSearchHandler(cluster, searchElement, deployState); addGUIHandler(cluster); validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement); } private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element modelEvaluationElement = XML.getChild(spec, "model-evaluation"); if (modelEvaluationElement == null) return; RankProfileList profiles = context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty; cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles)); } private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element processingElement = XML.getChild(spec, "processing"); if (processingElement == null) return; addIncludes(processingElement); cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement), serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new)); validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement); } private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) { SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec); ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options()); applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch); containerSearch.setQueryProfiles(deployState.getQueryProfiles()); containerSearch.setSemanticRules(deployState.getSemanticRules()); return containerSearch; } private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) { PageTemplates.validate(applicationPackage); containerSearch.setPageTemplates(PageTemplates.create(applicationPackage)); } private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element component: XML.getChildren(spec, "handler")) { cluster.addComponent( new DomHandlerBuilder(cluster).build(deployState, cluster, component)); } } private void checkVersion(Element spec) { String version = spec.getAttribute("version"); if ( ! Version.fromString(version).equals(new Version(1))) { throw new RuntimeException("Expected container version to be 1.0, but got " + version); } } private void checkTagName(Element spec, DeployLogger logger) { if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) { logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead."); } } private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { if (standaloneBuilder) addStandaloneNode(cluster); else addNodesFromXml(cluster, spec, context); } private void addStandaloneNode(ApplicationContainerCluster cluster) { ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa()); cluster.addContainers(Collections.singleton(container)); } static boolean incompatibleGCOptions(String jvmargs) { Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC"); Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS"); return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find()); } private static String buildJvmGCOptions(DeployState deployState, String jvmGCOPtions) { String options = (jvmGCOPtions != null) ? jvmGCOPtions : deployState.getProperties().jvmGCOptions(); return (options == null ||options.isEmpty()) ? (deployState.isHosted() ? ContainerCluster.CMS : ContainerCluster.G1GC) : options; } private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options."); } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); if (incompatibleGCOptions(jvmOptions)) { deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'"); cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } private static String extractAttribute(Element element, String attrName) { return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null; } void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger())); if (!cluster.getJvmGCOptions().isPresent()) { String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); } void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element jvmElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS)); applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } /** * Add nodes to cluster according to the given containerElement. * * Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set * of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed * simultaneously for all active config models. */ private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) { Element nodesElement = XML.getChild(containerElement, "nodes"); if (nodesElement == null) { cluster.addContainers(allocateWithoutNodesTag(cluster, context)); } else { List<ApplicationContainer> nodes = createNodes(cluster, nodesElement, context); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) { extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context); } else { extractJvmTag(nodes, cluster, jvmElement, context); } applyRoutingAliasProperties(nodes, cluster); applyDefaultPreload(nodes, nodesElement); String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)); if (environmentVars != null && !environmentVars.isEmpty()) { cluster.setEnvironmentVars(environmentVars); } if (useCpuSocketAffinity(nodesElement)) AbstractService.distributeCpuSocketAffinity(nodes); cluster.addContainers(nodes); } } private static String getEnvironmentVariables(Element environmentVariables) { StringBuilder sb = new StringBuilder(); if (environmentVariables != null) { for (Element var: XML.getChildren(environmentVariables)) { sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' '); } } return sb.toString(); } private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { if (nodesElement.hasAttribute("type")) return createNodesFromNodeType(cluster, nodesElement, context); else if (nodesElement.hasAttribute("of")) return createNodesFromContentServiceReference(cluster, nodesElement, context); else if (nodesElement.hasAttribute("count")) return createNodesFromNodeCount(cluster, nodesElement, context); else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed()) return createNodesFromNodeCount(cluster, nodesElement, context); else return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement); } private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) { if (!cluster.serviceAliases().isEmpty()) { result.forEach(container -> { container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(","))); }); } if (!cluster.endpointAliases().isEmpty()) { result.forEach(container -> { container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(","))); }); } } private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) { if (memoryPercentage == null || memoryPercentage.isEmpty()) return; memoryPercentage = memoryPercentage.trim(); if ( ! memoryPercentage.endsWith("%")) throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim(); try { cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage)); } catch (NumberFormatException e) { throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); } } /** Allocate a container cluster without a nodes tag */ private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) { DeployState deployState = context.getDeployState(); HostSystem hostSystem = cluster.hostSystem(); if (deployState.isHosted()) { ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(deployState.getWantedNodeVespaVersion()) .dockerImageRepository(deployState.getWantedDockerImageRepo()) .build(); int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1; Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()), false, !deployState.getProperties().isBootstrap()); var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log); return createNodesFromHosts(log, hosts, cluster); } return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context); } private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) { ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa()); node.setHostResource(host); node.initService(context.getDeployLogger()); return List.of(node); } private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(), ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodeType type = NodeType.valueOf(nodesElement.getAttribute("type")); ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(context.getDeployState().getWantedNodeVespaVersion()) .dockerImageRepository(context.getDeployState().getWantedDockerImageRepo()) .build(); Map<HostResource, ClusterMembership> hosts = cluster.getRoot().hostSystem().allocateHosts(clusterSpec, Capacity.fromRequiredNodeType(type), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodeSpecification; try { nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(cluster + " contains an invalid reference", e); } String referenceId = nodesElement.getAttribute("of"); cluster.setHostClusterId(referenceId); Map<HostResource, ClusterMembership> hosts = StorageGroup.provisionHosts(nodeSpecification, referenceId, cluster.getRoot().hostSystem(), context.getDeployLogger()); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) { List<ApplicationContainer> nodes = new ArrayList<>(); for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) { String id = "container." + entry.getValue().index(); ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa()); container.setHostResource(entry.getKey()); container.initService(deployLogger); nodes.add(container); } return nodes; } private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) { List<ApplicationContainer> nodes = new ArrayList<>(); int nodeIndex = 0; for (Element nodeElem: XML.getChildren(nodesElement, "node")) { nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem)); nodeIndex++; } return nodes; } private static boolean useCpuSocketAffinity(Element nodesElement) { if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)) return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)); else return false; } private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) { for (Container container: containers) { if (container.getAssignedJvmOptions().isEmpty()) container.prependJvmOptions(jvmArgs); } } private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) { if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return; for (Container container: containers) container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)); } private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement, DeployState deployState) { cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(), "com.yahoo.search.searchchain.ExecutionFactory")); cluster.addComponent( new SearchHandler( cluster, serverBindings(searchElement, SearchHandler.DEFAULT_BINDING), ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null))); } private void addGUIHandler(ApplicationContainerCluster cluster) { Handler<?> guiHandler = new GUIHandler(); guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH)); cluster.addComponent(guiHandler); } private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) { List<Element> bindings = XML.getChildren(searchElement, "binding"); if (bindings.isEmpty()) return List.of(defaultBindings); return toBindingList(bindings); } private List<BindingPattern> toBindingList(List<Element> bindingElements) { List<BindingPattern> result = new ArrayList<>(); for (Element element: bindingElements) { String text = element.getTextContent().trim(); if (!text.isEmpty()) result.add(UserBindingPattern.fromPattern(text)); } return result; } private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) { Element documentApiElement = XML.getChild(spec, "document-api"); if (documentApiElement == null) return null; ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement); return new ContainerDocumentApi(cluster, documentApiOptions); } private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { Element docprocElement = XML.getChild(spec, "document-processing"); if (docprocElement == null) return null; addIncludes(docprocElement); DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement); ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement); return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder); } private void addIncludes(Element parentElement) { List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE); if (includes == null || includes.isEmpty()) { return; } if (app == null) { throw new IllegalArgumentException("Element <include> given in XML config, but no application package given."); } for (Element include : includes) { addInclude(parentElement, include); } } private void addInclude(Element parentElement, Element include) { String dirName = include.getAttribute(IncludeDirs.DIR); app.validateIncludeDir(dirName); List<Element> includedFiles = Xml.allElemsFromPath(app, dirName); for (Element includedFile : includedFiles) { List<Element> includedSubElements = XML.getChildren(includedFile); for (Element includedSubElement : includedSubElements) { Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true); parentElement.appendChild(copiedNode); } } } private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName) { for (Element node : XML.getChildren(spec, componentName)) { cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName, Consumer<Element> elementValidator) { for (Element node : XML.getChildren(spec, componentName)) { elementValidator.accept(node); cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private void addIdentityProvider(ApplicationContainerCluster cluster, List<ConfigServerSpec> configServerSpecs, HostName loadBalancerName, URI ztsUrl, String athenzDnsSuffix, Zone zone, DeploymentSpec spec) { spec.athenzDomain() .ifPresent(domain -> { AthenzService service = spec.instance(app.getApplicationId().instance()) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> spec.athenzService()) .orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'")); String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix; IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone); cluster.addComponent(identityProvider); cluster.getContainers().forEach(container -> { container.setProp("identity.domain", domain.value()); container.setProp("identity.service", service.value()); }); }); } private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) { return Optional.ofNullable(loadbalancerName) .orElseGet( () -> HostName.from(configServerSpecs.stream() .findFirst() .map(ConfigServerSpec::getHostName) .orElse("unknown") )); } /** Disallow renderers named "XmlRenderer" or "JsonRenderer" */ private static void validateRendererElement(Element element) { String idAttr = element.getAttribute("id"); if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) { throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr)); } } public static boolean isContainerTag(Element element) { return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName()); } }
class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html"); private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE"; private static final String CONTAINER_TAG = "container"; private static final String DEPRECATED_CONTAINER_TAG = "jdisc"; private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables"; private static final int MIN_ZOOKEEPER_NODE_COUNT = 3; private static final int MAX_ZOOKEEPER_NODE_COUNT = 7; public enum Networking { disable, enable } private ApplicationPackage app; private final boolean standaloneBuilder; private final Networking networking; private final boolean rpcServerEnabled; private final boolean httpServerEnabled; protected DeployLogger log; public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG)); private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName(); private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName(); public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) { super(ContainerModel.class); this.standaloneBuilder = standaloneBuilder; this.networking = networking; this.rpcServerEnabled = !standaloneBuilder; this.httpServerEnabled = networking == Networking.enable; } @Override public List<ConfigModelId> handlesElements() { return configModelIds; } @Override public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) { log = modelContext.getDeployLogger(); app = modelContext.getApplicationPackage(); checkVersion(spec); checkTagName(spec, log); ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext); addClusterContent(cluster, spec, modelContext); cluster.setMessageBusEnabled(rpcServerEnabled); cluster.setRpcServerEnabled(rpcServerEnabled); cluster.setHttpServerEnabled(httpServerEnabled); model.setCluster(cluster); } private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) { return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() { @Override protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(), modelContext.getProducerId(), deployState); } }.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec); } private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { DeployState deployState = context.getDeployState(); DocumentFactoryBuilder.buildDocumentFactories(cluster, spec); addConfiguredComponents(deployState, cluster, spec); addSecretStore(cluster, spec); addRestApis(deployState, spec, cluster); addServlets(deployState, spec, cluster); addModelEvaluation(spec, cluster, context); addProcessing(deployState, spec, cluster); addSearch(deployState, spec, cluster); addDocproc(deployState, spec, cluster); addDocumentApi(spec, cluster); cluster.addDefaultHandlersExceptStatus(); addStatusHandlers(cluster, context.getDeployState().isHosted()); addUserHandlers(deployState, cluster, spec); addHttp(deployState, spec, cluster, context); addAccessLogs(deployState, cluster, spec); addRoutingAliases(cluster, spec, deployState.zone().environment()); addNodes(cluster, spec, context); addClientProviders(deployState, spec, cluster); addServerProviders(deployState, spec, cluster); addAthensCopperArgos(cluster, context); addZooKeeper(cluster, spec); } private void addSecretStore(ApplicationContainerCluster cluster, Element spec) { Element secretStoreElement = XML.getChild(spec, "secret-store"); if (secretStoreElement != null) { SecretStore secretStore = new SecretStore(); for (Element group : XML.getChildren(secretStoreElement, "group")) { secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment")); } cluster.setSecretStore(secretStore); } } private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) { if ( ! context.getDeployState().isHosted()) return; app.getDeployment().map(DeploymentSpec::fromXml) .ifPresent(deploymentSpec -> { addIdentityProvider(cluster, context.getDeployState().getProperties().configServerSpecs(), context.getDeployState().getProperties().loadBalancerName(), context.getDeployState().getProperties().ztsUrl(), context.getDeployState().getProperties().athenzDnsSuffix(), context.getDeployState().zone(), deploymentSpec); addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec); }); } private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) { cluster.getContainers().forEach(container -> { setRotations(container, endpoints, cluster.getName()); container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec))); }); } private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) { Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance()); if (instance.isEmpty()) return false; return instance.get().zones().stream() .anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) && declaredZone.active()); } private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) { var rotationsProperty = endpoints.stream() .filter(endpoint -> endpoint.clusterId().equals(containerClusterName)) .flatMap(endpoint -> endpoint.names().stream()) .collect(Collectors.toUnmodifiableSet()); container.setProp("rotations", String.join(",", rotationsProperty)); } private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) { if (environment != Environment.prod) return; Element aliases = XML.getChild(spec, "aliases"); for (Element alias : XML.getChildren(aliases, "service-alias")) { cluster.serviceAliases().add(XML.getValue(alias)); } for (Element alias : XML.getChildren(aliases, "endpoint-alias")) { cluster.endpointAliases().add(XML.getValue(alias)); } } private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element components : XML.getChildren(spec, "components")) { addIncludes(components); addConfiguredComponents(deployState, cluster, components, "component"); } addConfiguredComponents(deployState, cluster, spec, "component"); } protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) { if (isHostedVespa) { String name = "status.html"; Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING)); cluster.addComponent( new FileStatusHandlerComponent( name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE), SystemBindingPattern.fromHttpPath("/" + name))); } else { cluster.addVipHandler(); } } private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element clientSpec: XML.getChildren(spec, "client")) { cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec)); } } private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { addConfiguredComponents(deployState, cluster, spec, "server"); } private void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { List<Element> accessLogElements = getAccessLogElements(spec); for (Element accessLog : accessLogElements) { AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent); } if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) cluster.addDefaultSearchAccessLog(); } private List<Element> getAccessLogElements(Element spec) { return XML.getChildren(spec, "accesslog"); } private void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element httpElement = XML.getChild(spec, "http"); if (httpElement != null) { cluster.setHttp(buildHttp(deployState, cluster, httpElement)); } if (isHostedTenantApplication(context)) { addHostedImplicitHttpIfNotPresent(cluster); addHostedImplicitAccessControlIfNotPresent(deployState, cluster); addDefaultConnectorHostedFilterBinding(cluster); addAdditionalHostedConnector(deployState, cluster, context); } } private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) { cluster.getHttp().getAccessControl() .ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ; } private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) { JettyHttpServer server = cluster.getHttp().getHttpServer().get(); String serverName = server.getComponentId().getName(); HostedSslConnectorFactory connectorFactory; if (deployState.endpointCertificateSecrets().isPresent()) { boolean authorizeClient = deployState.zone().system().isPublic(); if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) { throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https: } EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get(); boolean enforceHandshakeClientAuth = context.properties().useAccessControlTlsHandshakeClientAuth() && cluster.getHttp().getAccessControl() .map(accessControl -> accessControl.clientAuthentication) .map(clientAuth -> clientAuth.equals(AccessControl.ClientAuthentication.need)) .orElse(false); connectorFactory = authorizeClient ? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get()) : HostedSslConnectorFactory.withProvidedCertificate(serverName, endpointCertificateSecrets, enforceHandshakeClientAuth); } else { connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName); } cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory)); server.addConnector(connectorFactory); } private static boolean isHostedTenantApplication(ConfigModelContext context) { var deployState = context.getDeployState(); boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester(); return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication; } private static void addHostedImplicitHttpIfNotPresent(ApplicationContainerCluster cluster) { if(cluster.getHttp() == null) { cluster.setHttp(new Http(new FilterChains(cluster))); } JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null); if (httpServer == null) { httpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"), cluster, cluster.isHostedVespa()); cluster.getHttp().setHttpServer(httpServer); } int defaultPort = Defaults.getDefaults().vespaWebServicePort(); boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort); if (!defaultConnectorPresent) { httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build()); } } private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) { Http http = cluster.getHttp(); if (http.getAccessControl().isPresent()) return; AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); if (tenantDomain == null) return; new AccessControl.Builder(tenantDomain.value()) .setHandlers(cluster) .readEnabled(false) .writeEnabled(false) .clientAuthentication(AccessControl.ClientAuthentication.need) .build() .configureHttpFilterChains(http); } private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) { Http http = new HttpBuilder().build(deployState, cluster, httpElement); if (networking == Networking.disable) http.removeAllServers(); return http; } private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element restApiElem : XML.getChildren(spec, "rest-api")) { cluster.addRestApi( new RestApiBuilder().build(deployState, cluster, restApiElem)); } } private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element servletElem : XML.getChildren(spec, "servlet")) cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem)); } private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) { ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec); if (containerDocumentApi == null) return; cluster.setDocumentApi(containerDocumentApi); } private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec); if (containerDocproc == null) return; cluster.setDocproc(containerDocproc); ContainerDocproc.Options docprocOptions = containerDocproc.options; cluster.setMbusParams(new ApplicationContainerCluster.MbusParams( docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory)); } private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element searchElement = XML.getChild(spec, "search"); if (searchElement == null) return; addIncludes(searchElement); cluster.setSearch(buildSearch(deployState, cluster, searchElement)); addSearchHandler(cluster, searchElement, deployState); addGUIHandler(cluster); validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement); } private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element modelEvaluationElement = XML.getChild(spec, "model-evaluation"); if (modelEvaluationElement == null) return; RankProfileList profiles = context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty; cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles)); } private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element processingElement = XML.getChild(spec, "processing"); if (processingElement == null) return; addIncludes(processingElement); cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement), serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new)); validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement); } private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) { SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec); ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options()); applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch); containerSearch.setQueryProfiles(deployState.getQueryProfiles()); containerSearch.setSemanticRules(deployState.getSemanticRules()); return containerSearch; } private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) { PageTemplates.validate(applicationPackage); containerSearch.setPageTemplates(PageTemplates.create(applicationPackage)); } private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element component: XML.getChildren(spec, "handler")) { cluster.addComponent( new DomHandlerBuilder(cluster).build(deployState, cluster, component)); } } private void checkVersion(Element spec) { String version = spec.getAttribute("version"); if ( ! Version.fromString(version).equals(new Version(1))) { throw new RuntimeException("Expected container version to be 1.0, but got " + version); } } private void checkTagName(Element spec, DeployLogger logger) { if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) { logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead."); } } private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { if (standaloneBuilder) addStandaloneNode(cluster); else addNodesFromXml(cluster, spec, context); } private void addStandaloneNode(ApplicationContainerCluster cluster) { ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa()); cluster.addContainers(Collections.singleton(container)); } static boolean incompatibleGCOptions(String jvmargs) { Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC"); Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS"); return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find()); } private static String buildJvmGCOptions(DeployState deployState, String jvmGCOPtions) { String options = (jvmGCOPtions != null) ? jvmGCOPtions : deployState.getProperties().jvmGCOptions(); return (options == null ||options.isEmpty()) ? (deployState.isHosted() ? ContainerCluster.CMS : ContainerCluster.G1GC) : options; } private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options."); } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); if (incompatibleGCOptions(jvmOptions)) { deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'"); cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } private static String extractAttribute(Element element, String attrName) { return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null; } void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger())); if (!cluster.getJvmGCOptions().isPresent()) { String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); } void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element jvmElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS)); applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } /** * Add nodes to cluster according to the given containerElement. * * Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set * of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed * simultaneously for all active config models. */ private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) { Element nodesElement = XML.getChild(containerElement, "nodes"); if (nodesElement == null) { cluster.addContainers(allocateWithoutNodesTag(cluster, context)); } else { List<ApplicationContainer> nodes = createNodes(cluster, nodesElement, context); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) { extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context); } else { extractJvmTag(nodes, cluster, jvmElement, context); } applyRoutingAliasProperties(nodes, cluster); applyDefaultPreload(nodes, nodesElement); String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)); if (environmentVars != null && !environmentVars.isEmpty()) { cluster.setEnvironmentVars(environmentVars); } if (useCpuSocketAffinity(nodesElement)) AbstractService.distributeCpuSocketAffinity(nodes); cluster.addContainers(nodes); } } private static String getEnvironmentVariables(Element environmentVariables) { StringBuilder sb = new StringBuilder(); if (environmentVariables != null) { for (Element var: XML.getChildren(environmentVariables)) { sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' '); } } return sb.toString(); } private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { if (nodesElement.hasAttribute("type")) return createNodesFromNodeType(cluster, nodesElement, context); else if (nodesElement.hasAttribute("of")) return createNodesFromContentServiceReference(cluster, nodesElement, context); else if (nodesElement.hasAttribute("count")) return createNodesFromNodeCount(cluster, nodesElement, context); else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed()) return createNodesFromNodeCount(cluster, nodesElement, context); else return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement); } private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) { if (!cluster.serviceAliases().isEmpty()) { result.forEach(container -> { container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(","))); }); } if (!cluster.endpointAliases().isEmpty()) { result.forEach(container -> { container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(","))); }); } } private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) { if (memoryPercentage == null || memoryPercentage.isEmpty()) return; memoryPercentage = memoryPercentage.trim(); if ( ! memoryPercentage.endsWith("%")) throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim(); try { cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage)); } catch (NumberFormatException e) { throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); } } /** Allocate a container cluster without a nodes tag */ private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) { DeployState deployState = context.getDeployState(); HostSystem hostSystem = cluster.hostSystem(); if (deployState.isHosted()) { ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(deployState.getWantedNodeVespaVersion()) .dockerImageRepository(deployState.getWantedDockerImageRepo()) .build(); int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1; Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()), false, !deployState.getProperties().isBootstrap()); var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log); return createNodesFromHosts(log, hosts, cluster); } return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context); } private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) { ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa()); node.setHostResource(host); node.initService(context.getDeployLogger()); return List.of(node); } private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(), ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodeType type = NodeType.valueOf(nodesElement.getAttribute("type")); ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(context.getDeployState().getWantedNodeVespaVersion()) .dockerImageRepository(context.getDeployState().getWantedDockerImageRepo()) .build(); Map<HostResource, ClusterMembership> hosts = cluster.getRoot().hostSystem().allocateHosts(clusterSpec, Capacity.fromRequiredNodeType(type), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodeSpecification; try { nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(cluster + " contains an invalid reference", e); } String referenceId = nodesElement.getAttribute("of"); cluster.setHostClusterId(referenceId); Map<HostResource, ClusterMembership> hosts = StorageGroup.provisionHosts(nodeSpecification, referenceId, cluster.getRoot().hostSystem(), context.getDeployLogger()); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) { List<ApplicationContainer> nodes = new ArrayList<>(); for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) { String id = "container." + entry.getValue().index(); ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa()); container.setHostResource(entry.getKey()); container.initService(deployLogger); nodes.add(container); } return nodes; } private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) { List<ApplicationContainer> nodes = new ArrayList<>(); int nodeIndex = 0; for (Element nodeElem: XML.getChildren(nodesElement, "node")) { nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem)); nodeIndex++; } return nodes; } private static boolean useCpuSocketAffinity(Element nodesElement) { if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)) return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)); else return false; } private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) { for (Container container: containers) { if (container.getAssignedJvmOptions().isEmpty()) container.prependJvmOptions(jvmArgs); } } private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) { if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return; for (Container container: containers) container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)); } private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement, DeployState deployState) { cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(), "com.yahoo.search.searchchain.ExecutionFactory")); cluster.addComponent( new SearchHandler( cluster, serverBindings(searchElement, SearchHandler.DEFAULT_BINDING), ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null))); } private void addGUIHandler(ApplicationContainerCluster cluster) { Handler<?> guiHandler = new GUIHandler(); guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH)); cluster.addComponent(guiHandler); } private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) { List<Element> bindings = XML.getChildren(searchElement, "binding"); if (bindings.isEmpty()) return List.of(defaultBindings); return toBindingList(bindings); } private List<BindingPattern> toBindingList(List<Element> bindingElements) { List<BindingPattern> result = new ArrayList<>(); for (Element element: bindingElements) { String text = element.getTextContent().trim(); if (!text.isEmpty()) result.add(UserBindingPattern.fromPattern(text)); } return result; } private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) { Element documentApiElement = XML.getChild(spec, "document-api"); if (documentApiElement == null) return null; ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement); return new ContainerDocumentApi(cluster, documentApiOptions); } private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { Element docprocElement = XML.getChild(spec, "document-processing"); if (docprocElement == null) return null; addIncludes(docprocElement); DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement); ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement); return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder); } private void addIncludes(Element parentElement) { List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE); if (includes == null || includes.isEmpty()) { return; } if (app == null) { throw new IllegalArgumentException("Element <include> given in XML config, but no application package given."); } for (Element include : includes) { addInclude(parentElement, include); } } private void addInclude(Element parentElement, Element include) { String dirName = include.getAttribute(IncludeDirs.DIR); app.validateIncludeDir(dirName); List<Element> includedFiles = Xml.allElemsFromPath(app, dirName); for (Element includedFile : includedFiles) { List<Element> includedSubElements = XML.getChildren(includedFile); for (Element includedSubElement : includedSubElements) { Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true); parentElement.appendChild(copiedNode); } } } private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName) { for (Element node : XML.getChildren(spec, componentName)) { cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName, Consumer<Element> elementValidator) { for (Element node : XML.getChildren(spec, componentName)) { elementValidator.accept(node); cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private void addIdentityProvider(ApplicationContainerCluster cluster, List<ConfigServerSpec> configServerSpecs, HostName loadBalancerName, URI ztsUrl, String athenzDnsSuffix, Zone zone, DeploymentSpec spec) { spec.athenzDomain() .ifPresent(domain -> { AthenzService service = spec.instance(app.getApplicationId().instance()) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> spec.athenzService()) .orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'")); String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix; IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone); cluster.addComponent(identityProvider); cluster.getContainers().forEach(container -> { container.setProp("identity.domain", domain.value()); container.setProp("identity.service", service.value()); }); }); } private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) { return Optional.ofNullable(loadbalancerName) .orElseGet( () -> HostName.from(configServerSpecs.stream() .findFirst() .map(ConfigServerSpec::getHostName) .orElse("unknown") )); } /** Disallow renderers named "XmlRenderer" or "JsonRenderer" */ private static void validateRendererElement(Element element) { String idAttr = element.getAttribute("id"); if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) { throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr)); } } public static boolean isContainerTag(Element element) { return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName()); } }
```suggestion bandwidthGbps.equals(that.bandwidthGbps); ```
public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClusterCapacity that = (ClusterCapacity) o; return count == that.count && Double.compare(that.vcpu, vcpu) == 0 && Double.compare(that.memoryGb, memoryGb) == 0 && Double.compare(that.diskGb, diskGb) == 0 && ((bandwidthGbps.isEmpty() && that.bandwidthGbps.isEmpty()) || ((bandwidthGbps.isPresent() && that.bandwidthGbps.isPresent() && Double.compare(that.bandwidthGbps.get(), bandwidthGbps.get()) == 0))); }
Double.compare(that.bandwidthGbps.get(), bandwidthGbps.get()) == 0)));
public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClusterCapacity that = (ClusterCapacity) o; return count == that.count && Double.compare(that.vcpu, vcpu) == 0 && Double.compare(that.memoryGb, memoryGb) == 0 && Double.compare(that.diskGb, diskGb) == 0 && bandwidthGbps.equals(that.bandwidthGbps); }
class ClusterCapacity { private final int count; private final double vcpu; private final double memoryGb; private final double diskGb; private final Optional<Double> bandwidthGbps; @JsonCreator public ClusterCapacity(@JsonProperty("count") int count, @JsonProperty("vcpu") double vcpu, @JsonProperty("memoryGb") double memoryGb, @JsonProperty("diskGb") double diskGb, @JsonProperty("bandwidthGbps") Double bandwidthGbps) { this.count = (int) requireNonNegative("count", count); this.vcpu = requireNonNegative("vcpu", vcpu); this.memoryGb = requireNonNegative("memoryGb", memoryGb); this.diskGb = requireNonNegative("diskGb", diskGb); this.bandwidthGbps = Optional.ofNullable(bandwidthGbps); } /** Returns a new ClusterCapacity equal to {@code this}, but with the given count. */ public ClusterCapacity withCount(int count) { return new ClusterCapacity(count, vcpu, memoryGb, diskGb, bandwidthGbps.orElse(null)); } @JsonGetter("count") public int count() { return count; } @JsonGetter("vcpu") public double vcpu() { return vcpu; } @JsonGetter("memoryGb") public double memoryGb() { return memoryGb; } @JsonGetter("diskGb") public double diskGb() { return diskGb; } @JsonGetter("bandwidthGbps") public Double bandwidthGbpsOrNull() { return bandwidthGbps.orElse(null); } @JsonIgnore public double bandwidthGbps() { return bandwidthGbps.orElse(1.0); } @Override public String toString() { return "ClusterCapacity{" + "count=" + count + ", vcpu=" + vcpu + ", memoryGb=" + memoryGb + ", diskGb=" + diskGb + ", bandwidthGbps=" + bandwidthGbps + '}'; } @Override @Override public int hashCode() { return Objects.hash(count, vcpu, memoryGb, diskGb, bandwidthGbps); } private static double requireNonNegative(String name, double value) { if (value < 0) throw new IllegalArgumentException("'" + name + "' must be positive, was " + value); return value; } }
class ClusterCapacity { private final int count; private final double vcpu; private final double memoryGb; private final double diskGb; private final OptionalDouble bandwidthGbps; @JsonCreator public ClusterCapacity(@JsonProperty("count") int count, @JsonProperty("vcpu") double vcpu, @JsonProperty("memoryGb") double memoryGb, @JsonProperty("diskGb") double diskGb, @JsonProperty("bandwidthGbps") Double bandwidthGbps) { this.count = (int) requireNonNegative("count", count); this.vcpu = requireNonNegative("vcpu", vcpu); this.memoryGb = requireNonNegative("memoryGb", memoryGb); this.diskGb = requireNonNegative("diskGb", diskGb); this.bandwidthGbps = bandwidthGbps == null ? OptionalDouble.empty() : OptionalDouble.of(bandwidthGbps); } /** Returns a new ClusterCapacity equal to {@code this}, but with the given count. */ public ClusterCapacity withCount(int count) { return new ClusterCapacity(count, vcpu, memoryGb, diskGb, bandwidthGbpsOrNull()); } @JsonGetter("count") public int count() { return count; } @JsonGetter("vcpu") public double vcpu() { return vcpu; } @JsonGetter("memoryGb") public double memoryGb() { return memoryGb; } @JsonGetter("diskGb") public double diskGb() { return diskGb; } @JsonGetter("bandwidthGbps") public Double bandwidthGbpsOrNull() { return bandwidthGbps.isPresent() ? bandwidthGbps.getAsDouble() : null; } @JsonIgnore public double bandwidthGbps() { return bandwidthGbps.orElse(1.0); } @Override public String toString() { return "ClusterCapacity{" + "count=" + count + ", vcpu=" + vcpu + ", memoryGb=" + memoryGb + ", diskGb=" + diskGb + ", bandwidthGbps=" + bandwidthGbps + '}'; } @Override @Override public int hashCode() { return Objects.hash(count, vcpu, memoryGb, diskGb, bandwidthGbps); } private static double requireNonNegative(String name, double value) { if (value < 0) throw new IllegalArgumentException("'" + name + "' must be positive, was " + value); return value; } }
Yes, in case we downgraded at one point. It's also not more confusing to explicitly pick the lowest version, than to assume we always upgrade, and pick the earliest installed, imho. I'll add a note.
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
if (run.status() == RunStatus.success)
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
I decided to use Double.compare because it was used to test for equality by the auto-generated code for all the double fields. According to Effective Java, Double.compare should be used to compare doubles to 1. take care of +/- 0.0 and NaN and 2. to avoid auto-boxing. With Optional<Double> (2) is not relevant. I switched to using OptionalDouble which uses Double.compare in equals.
public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClusterCapacity that = (ClusterCapacity) o; return count == that.count && Double.compare(that.vcpu, vcpu) == 0 && Double.compare(that.memoryGb, memoryGb) == 0 && Double.compare(that.diskGb, diskGb) == 0 && ((bandwidthGbps.isEmpty() && that.bandwidthGbps.isEmpty()) || ((bandwidthGbps.isPresent() && that.bandwidthGbps.isPresent() && Double.compare(that.bandwidthGbps.get(), bandwidthGbps.get()) == 0))); }
Double.compare(that.bandwidthGbps.get(), bandwidthGbps.get()) == 0)));
public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClusterCapacity that = (ClusterCapacity) o; return count == that.count && Double.compare(that.vcpu, vcpu) == 0 && Double.compare(that.memoryGb, memoryGb) == 0 && Double.compare(that.diskGb, diskGb) == 0 && bandwidthGbps.equals(that.bandwidthGbps); }
class ClusterCapacity { private final int count; private final double vcpu; private final double memoryGb; private final double diskGb; private final Optional<Double> bandwidthGbps; @JsonCreator public ClusterCapacity(@JsonProperty("count") int count, @JsonProperty("vcpu") double vcpu, @JsonProperty("memoryGb") double memoryGb, @JsonProperty("diskGb") double diskGb, @JsonProperty("bandwidthGbps") Double bandwidthGbps) { this.count = (int) requireNonNegative("count", count); this.vcpu = requireNonNegative("vcpu", vcpu); this.memoryGb = requireNonNegative("memoryGb", memoryGb); this.diskGb = requireNonNegative("diskGb", diskGb); this.bandwidthGbps = Optional.ofNullable(bandwidthGbps); } /** Returns a new ClusterCapacity equal to {@code this}, but with the given count. */ public ClusterCapacity withCount(int count) { return new ClusterCapacity(count, vcpu, memoryGb, diskGb, bandwidthGbps.orElse(null)); } @JsonGetter("count") public int count() { return count; } @JsonGetter("vcpu") public double vcpu() { return vcpu; } @JsonGetter("memoryGb") public double memoryGb() { return memoryGb; } @JsonGetter("diskGb") public double diskGb() { return diskGb; } @JsonGetter("bandwidthGbps") public Double bandwidthGbpsOrNull() { return bandwidthGbps.orElse(null); } @JsonIgnore public double bandwidthGbps() { return bandwidthGbps.orElse(1.0); } @Override public String toString() { return "ClusterCapacity{" + "count=" + count + ", vcpu=" + vcpu + ", memoryGb=" + memoryGb + ", diskGb=" + diskGb + ", bandwidthGbps=" + bandwidthGbps + '}'; } @Override @Override public int hashCode() { return Objects.hash(count, vcpu, memoryGb, diskGb, bandwidthGbps); } private static double requireNonNegative(String name, double value) { if (value < 0) throw new IllegalArgumentException("'" + name + "' must be positive, was " + value); return value; } }
class ClusterCapacity { private final int count; private final double vcpu; private final double memoryGb; private final double diskGb; private final OptionalDouble bandwidthGbps; @JsonCreator public ClusterCapacity(@JsonProperty("count") int count, @JsonProperty("vcpu") double vcpu, @JsonProperty("memoryGb") double memoryGb, @JsonProperty("diskGb") double diskGb, @JsonProperty("bandwidthGbps") Double bandwidthGbps) { this.count = (int) requireNonNegative("count", count); this.vcpu = requireNonNegative("vcpu", vcpu); this.memoryGb = requireNonNegative("memoryGb", memoryGb); this.diskGb = requireNonNegative("diskGb", diskGb); this.bandwidthGbps = bandwidthGbps == null ? OptionalDouble.empty() : OptionalDouble.of(bandwidthGbps); } /** Returns a new ClusterCapacity equal to {@code this}, but with the given count. */ public ClusterCapacity withCount(int count) { return new ClusterCapacity(count, vcpu, memoryGb, diskGb, bandwidthGbpsOrNull()); } @JsonGetter("count") public int count() { return count; } @JsonGetter("vcpu") public double vcpu() { return vcpu; } @JsonGetter("memoryGb") public double memoryGb() { return memoryGb; } @JsonGetter("diskGb") public double diskGb() { return diskGb; } @JsonGetter("bandwidthGbps") public Double bandwidthGbpsOrNull() { return bandwidthGbps.isPresent() ? bandwidthGbps.getAsDouble() : null; } @JsonIgnore public double bandwidthGbps() { return bandwidthGbps.orElse(1.0); } @Override public String toString() { return "ClusterCapacity{" + "count=" + count + ", vcpu=" + vcpu + ", memoryGb=" + memoryGb + ", diskGb=" + diskGb + ", bandwidthGbps=" + bandwidthGbps + '}'; } @Override @Override public int hashCode() { return Objects.hash(count, vcpu, memoryGb, diskGb, bandwidthGbps); } private static double requireNonNegative(String name, double value) { if (value < 0) throw new IllegalArgumentException("'" + name + "' must be positive, was " + value); return value; } }
Is the parent guaranteed to be this type and non-null?
public void getConfig(ZookeeperServerConfig.Builder builder) { ((ApplicationContainerCluster)parent).getConfig(builder); builder.myid(index()); }
((ApplicationContainerCluster)parent).getConfig(builder);
public void getConfig(ZookeeperServerConfig.Builder builder) { AbstractConfigProducer<?> parent = getParent(); if (parent == null) return; if (parent instanceof ApplicationContainerCluster) ((ApplicationContainerCluster) this.parent).getConfig(builder); builder.myid(index()); }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, boolean isHostedVespa) { this(parent, name, false, index, isHostedVespa); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, boolean isHostedVespa) { super(parent, name, retired, index, isHostedVespa); this.isHostedVespa = isHostedVespa; addComponent(getFS4ResourcePool()); } private static Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override public String getJvmOptions() { String jvmArgs = super.getJvmOptions(); return isHostedVespa && hasDocproc() ? ("".equals(jvmArgs) ? defaultHostedJVMArgs : defaultHostedJVMArgs + " " + jvmArgs) : jvmArgs; } private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, boolean isHostedVespa) { this(parent, name, false, index, isHostedVespa); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, boolean isHostedVespa) { super(parent, name, retired, index, isHostedVespa); this.isHostedVespa = isHostedVespa; addComponent(getFS4ResourcePool()); } private static Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override public String getJvmOptions() { String jvmArgs = super.getJvmOptions(); return isHostedVespa && hasDocproc() ? ("".equals(jvmArgs) ? defaultHostedJVMArgs : defaultHostedJVMArgs + " " + jvmArgs) : jvmArgs; } private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override }
Well, better to check anyway, I'll fix.
public void getConfig(ZookeeperServerConfig.Builder builder) { ((ApplicationContainerCluster)parent).getConfig(builder); builder.myid(index()); }
((ApplicationContainerCluster)parent).getConfig(builder);
public void getConfig(ZookeeperServerConfig.Builder builder) { AbstractConfigProducer<?> parent = getParent(); if (parent == null) return; if (parent instanceof ApplicationContainerCluster) ((ApplicationContainerCluster) this.parent).getConfig(builder); builder.myid(index()); }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, boolean isHostedVespa) { this(parent, name, false, index, isHostedVespa); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, boolean isHostedVespa) { super(parent, name, retired, index, isHostedVespa); this.isHostedVespa = isHostedVespa; addComponent(getFS4ResourcePool()); } private static Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override public String getJvmOptions() { String jvmArgs = super.getJvmOptions(); return isHostedVespa && hasDocproc() ? ("".equals(jvmArgs) ? defaultHostedJVMArgs : defaultHostedJVMArgs + " " + jvmArgs) : jvmArgs; } private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, boolean isHostedVespa) { this(parent, name, false, index, isHostedVespa); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, boolean isHostedVespa) { super(parent, name, retired, index, isHostedVespa); this.isHostedVespa = isHostedVespa; addComponent(getFS4ResourcePool()); } private static Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override public String getJvmOptions() { String jvmArgs = super.getJvmOptions(); return isHostedVespa && hasDocproc() ? ("".equals(jvmArgs) ? defaultHostedJVMArgs : defaultHostedJVMArgs + " " + jvmArgs) : jvmArgs; } private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override }
null is passed when connection is suspended, lost, or on (re)connected, or when cache is initialized(?) So I think the correct thing to do is to duplicate the current statement to within each `case` in the below switch (and document getData may be null on some other event types)
private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { ChildData data = event.getData(); if (data == null) return; ApplicationId applicationId = ApplicationId.fromSerializedForm(Path.fromString(data.getPath()).getName()); switch (event.getType()) { case CHILD_ADDED: /* A new application is added when a session is added, @see {@link com.yahoo.vespa.config.server.session.SessionRepository log.log(Level.FINE, TenantRepository.logPre(applicationId) + "Application added: " + applicationId); break; case CHILD_REMOVED: removeApplication(applicationId); break; case CHILD_UPDATED: break; default: break; } removeUnusedApplications(); }); }
if (data == null) return;
private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { switch (event.getType()) { case CHILD_ADDED: /* A new application is added when a session is added, @see {@link com.yahoo.vespa.config.server.session.SessionRepository ApplicationId applicationId = ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName()); log.log(Level.FINE, TenantRepository.logPre(applicationId) + "Application added: " + applicationId); break; case CHILD_REMOVED: removeApplication(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeUnusedApplications(); }); }
class TenantApplications implements RequestHandler, HostValidator<ApplicationId> { private static final Logger log = Logger.getLogger(TenantApplications.class.getName()); private final ApplicationCuratorDatabase database; private final Curator.DirectoryCache directoryCache; private final Executor zkWatcherExecutor; private final Metrics metrics; private final TenantName tenant; private final ReloadListener reloadListener; private final ConfigResponseFactory responseFactory; private final HostRegistry<ApplicationId> hostRegistry; private final ApplicationMapper applicationMapper = new ApplicationMapper(); private final MetricUpdater tenantMetricUpdater; private final Clock clock; private final TenantFileSystemDirs tenantFileSystemDirs; public TenantApplications(TenantName tenant, Curator curator, StripedExecutor<TenantName> zkWatcherExecutor, ExecutorService zkCacheExecutor, Metrics metrics, ReloadListener reloadListener, ConfigserverConfig configserverConfig, HostRegistry<ApplicationId> hostRegistry, TenantFileSystemDirs tenantFileSystemDirs, Clock clock) { this.database = new ApplicationCuratorDatabase(tenant, curator); this.tenant = tenant; this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenant, command); this.directoryCache = database.createApplicationsPathCache(zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); this.metrics = metrics; this.reloadListener = reloadListener; this.responseFactory = ConfigResponseFactory.create(configserverConfig); this.tenantMetricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenant)); this.hostRegistry = hostRegistry; this.tenantFileSystemDirs = tenantFileSystemDirs; this.clock = clock; } public static TenantApplications create(GlobalComponentRegistry componentRegistry, TenantName tenantName) { return new TenantApplications(tenantName, componentRegistry.getCurator(), componentRegistry.getZkWatcherExecutor(), componentRegistry.getZkCacheExecutor(), componentRegistry.getMetrics(), componentRegistry.getReloadListener(), componentRegistry.getConfigserverConfig(), componentRegistry.getHostRegistries().createApplicationHostRegistry(tenantName), new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName), componentRegistry.getClock()); } /** The curator backed ZK storage of this. */ public ApplicationCuratorDatabase database() { return database; } /** * List the active applications of a tenant in this config server. * * @return a list of {@link ApplicationId}s that are active. */ public List<ApplicationId> activeApplications() { return database().activeApplications(); } public boolean exists(ApplicationId id) { return database().exists(id); } /** * Returns the active session id for the given application. * Returns Optional.empty if application not found or no active session exists. */ public Optional<Long> activeSessionOf(ApplicationId id) { return database().activeSessionOf(id); } public boolean sessionExistsInFileSystem(long sessionId) { return Files.exists(Paths.get(tenantFileSystemDirs.sessionsPath().getAbsolutePath(), String.valueOf(sessionId))); } /** * Returns a transaction which writes the given session id as the currently active for the given application. * * @param applicationId An {@link ApplicationId} that represents an active application. * @param sessionId Id of the session containing the application package for this id. */ public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) { return database().createPutTransaction(applicationId, sessionId); } /** * Creates a node for the given application, marking its existence. */ public void createApplication(ApplicationId id) { database().createApplication(id, clock.instant()); } /** * Return the active session id for a given application. * * @param applicationId an {@link ApplicationId} * @return session id of given application id. * @throws IllegalArgumentException if the application does not exist */ public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); } /** * Returns a transaction which deletes this application. */ public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) { return database().createDeleteTransaction(applicationId); } /** * Removes all applications not known to this from the config server state. */ public void removeUnusedApplications() { removeApplicationsExcept(Set.copyOf(activeApplications())); } /** * Closes the application repo. Once a repo has been closed, it should not be used again. */ public void close() { directoryCache.close(); } /** Returns the lock for changing the session status of the given application. */ public Lock lock(ApplicationId id) { return database().lock(id); } /** * Gets a config for the given app, or null if not found */ @Override public ConfigResponse resolveConfig(ApplicationId appId, GetConfigRequest req, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, TenantRepository.logPre(appId) + "Resolving for tenant '" + tenant + "' with handler for application '" + application + "'"); } return application.resolveConfig(req, responseFactory); } private void notifyReloadListeners(ApplicationSet applicationSet) { reloadListener.hostsUpdated(tenant, hostRegistry.getAllHosts()); reloadListener.configActivated(applicationSet); } /** * Activates the config of the given app. Notifies listeners * * @param applicationSet the {@link ApplicationSet} to be reloaded */ public void activateApplication(ApplicationSet applicationSet, long activeSessionId) { ApplicationId id = applicationSet.getId(); try (Lock lock = lock(id)) { if ( ! exists(id)) return; if (applicationSet.getApplicationGeneration() != activeSessionId) return; setLiveApp(applicationSet); notifyReloadListeners(applicationSet); } } public void removeApplication(ApplicationId applicationId) { try (Lock lock = lock(applicationId)) { if (exists(applicationId)) { log.log(Level.INFO, "Tried removing application " + applicationId + ", but it seems to have been deployed again"); return; } if (applicationMapper.hasApplication(applicationId, clock.instant())) { applicationMapper.remove(applicationId); hostRegistry.removeHostsForKey(applicationId); reloadListenersOnRemove(applicationId); tenantMetricUpdater.setApplications(applicationMapper.numApplications()); metrics.removeMetricUpdater(Metrics.createDimensions(applicationId)); log.log(Level.INFO, "Application removed: " + applicationId); } } } public void removeApplicationsExcept(Set<ApplicationId> applications) { for (ApplicationId activeApplication : applicationMapper.listApplicationIds()) { if ( ! applications.contains(activeApplication)) { removeApplication(activeApplication); } } } private void reloadListenersOnRemove(ApplicationId applicationId) { reloadListener.hostsUpdated(tenant, hostRegistry.getAllHosts()); reloadListener.applicationRemoved(applicationId); } private void setLiveApp(ApplicationSet applicationSet) { ApplicationId id = applicationSet.getId(); Collection<String> hostsForApp = applicationSet.getAllHosts(); hostRegistry.update(id, hostsForApp); applicationSet.updateHostMetrics(); tenantMetricUpdater.setApplications(applicationMapper.numApplications()); applicationMapper.register(id, applicationSet); } @Override public Set<ConfigKey<?>> listNamedConfigs(ApplicationId appId, Optional<Version> vespaVersion, ConfigKey<?> keyToMatch, boolean recursive) { Application application = getApplication(appId, vespaVersion); return listConfigs(application, keyToMatch, recursive); } private Set<ConfigKey<?>> listConfigs(Application application, ConfigKey<?> keyToMatch, boolean recursive) { Set<ConfigKey<?>> ret = new LinkedHashSet<>(); for (ConfigKey<?> key : application.allConfigsProduced()) { String configId = key.getConfigId(); if (recursive) { key = new ConfigKey<>(key.getName(), configId, key.getNamespace()); } else { key = new ConfigKey<>(key.getName(), configId.split("/")[0], key.getNamespace()); } if (keyToMatch != null) { String n = key.getName(); String ns = key.getNamespace(); if (n.equals(keyToMatch.getName()) && ns.equals(keyToMatch.getNamespace()) && configId.startsWith(keyToMatch.getConfigId()) && !(configId.equals(keyToMatch.getConfigId()))) { if (!recursive) { key = new ConfigKey<>(key.getName(), appendOneLevelOfId(keyToMatch.getConfigId(), configId), key.getNamespace()); } ret.add(key); } } else { ret.add(key); } } return ret; } @Override public Set<ConfigKey<?>> listConfigs(ApplicationId appId, Optional<Version> vespaVersion, boolean recursive) { Application application = getApplication(appId, vespaVersion); return listConfigs(application, null, recursive); } /** * Given baseIdSegment search/ and id search/qrservers/default.0, return search/qrservers * @return id segment with one extra level from the id appended */ String appendOneLevelOfId(String baseIdSegment, String id) { if ("".equals(baseIdSegment)) return id.split("/")[0]; String theRest = id.substring(baseIdSegment.length()); if ("".equals(theRest)) return id; theRest = theRest.replaceFirst("/", ""); String theRestFirstSeg = theRest.split("/")[0]; return baseIdSegment+"/"+theRestFirstSeg; } @Override public Set<ConfigKey<?>> allConfigsProduced(ApplicationId appId, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); return application.allConfigsProduced(); } private Application getApplication(ApplicationId appId, Optional<Version> vespaVersion) { try { return applicationMapper.getForVersion(appId, vespaVersion, clock.instant()); } catch (VersionDoesNotExistException ex) { throw new NotFoundException(String.format("%sNo such application (id %s): %s", TenantRepository.logPre(tenant), appId, ex.getMessage())); } } @Override public Set<String> allConfigIds(ApplicationId appId, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); return application.allConfigIds(); } @Override public boolean hasApplication(ApplicationId appId, Optional<Version> vespaVersion) { return hasHandler(appId, vespaVersion); } private boolean hasHandler(ApplicationId appId, Optional<Version> vespaVersion) { return applicationMapper.hasApplicationForVersion(appId, vespaVersion, clock.instant()); } @Override public ApplicationId resolveApplicationId(String hostName) { return hostRegistry.getKeyForHost(hostName); } @Override public Set<FileReference> listFileReferences(ApplicationId applicationId) { return applicationMapper.listApplications(applicationId).stream() .flatMap(app -> app.getModel().fileReferences().stream()) .collect(toSet()); } @Override public void verifyHosts(ApplicationId key, Collection<String> newHosts) { hostRegistry.verifyHosts(key, newHosts); reloadListener.verifyHostsAreAvailable(tenant, newHosts); } public HostValidator<ApplicationId> getHostValidator() { return this; } public ApplicationId getApplicationIdForHostName(String hostname) { return hostRegistry.getKeyForHost(hostname); } public TenantFileSystemDirs getTenantFileSystemDirs() { return tenantFileSystemDirs; } }
class TenantApplications implements RequestHandler, HostValidator<ApplicationId> { private static final Logger log = Logger.getLogger(TenantApplications.class.getName()); private final ApplicationCuratorDatabase database; private final Curator.DirectoryCache directoryCache; private final Executor zkWatcherExecutor; private final Metrics metrics; private final TenantName tenant; private final ReloadListener reloadListener; private final ConfigResponseFactory responseFactory; private final HostRegistry<ApplicationId> hostRegistry; private final ApplicationMapper applicationMapper = new ApplicationMapper(); private final MetricUpdater tenantMetricUpdater; private final Clock clock; private final TenantFileSystemDirs tenantFileSystemDirs; public TenantApplications(TenantName tenant, Curator curator, StripedExecutor<TenantName> zkWatcherExecutor, ExecutorService zkCacheExecutor, Metrics metrics, ReloadListener reloadListener, ConfigserverConfig configserverConfig, HostRegistry<ApplicationId> hostRegistry, TenantFileSystemDirs tenantFileSystemDirs, Clock clock) { this.database = new ApplicationCuratorDatabase(tenant, curator); this.tenant = tenant; this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenant, command); this.directoryCache = database.createApplicationsPathCache(zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); this.metrics = metrics; this.reloadListener = reloadListener; this.responseFactory = ConfigResponseFactory.create(configserverConfig); this.tenantMetricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenant)); this.hostRegistry = hostRegistry; this.tenantFileSystemDirs = tenantFileSystemDirs; this.clock = clock; } public static TenantApplications create(GlobalComponentRegistry componentRegistry, TenantName tenantName) { return new TenantApplications(tenantName, componentRegistry.getCurator(), componentRegistry.getZkWatcherExecutor(), componentRegistry.getZkCacheExecutor(), componentRegistry.getMetrics(), componentRegistry.getReloadListener(), componentRegistry.getConfigserverConfig(), componentRegistry.getHostRegistries().createApplicationHostRegistry(tenantName), new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName), componentRegistry.getClock()); } /** The curator backed ZK storage of this. */ public ApplicationCuratorDatabase database() { return database; } /** * List the active applications of a tenant in this config server. * * @return a list of {@link ApplicationId}s that are active. */ public List<ApplicationId> activeApplications() { return database().activeApplications(); } public boolean exists(ApplicationId id) { return database().exists(id); } /** * Returns the active session id for the given application. * Returns Optional.empty if application not found or no active session exists. */ public Optional<Long> activeSessionOf(ApplicationId id) { return database().activeSessionOf(id); } public boolean sessionExistsInFileSystem(long sessionId) { return Files.exists(Paths.get(tenantFileSystemDirs.sessionsPath().getAbsolutePath(), String.valueOf(sessionId))); } /** * Returns a transaction which writes the given session id as the currently active for the given application. * * @param applicationId An {@link ApplicationId} that represents an active application. * @param sessionId Id of the session containing the application package for this id. */ public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) { return database().createPutTransaction(applicationId, sessionId); } /** * Creates a node for the given application, marking its existence. */ public void createApplication(ApplicationId id) { database().createApplication(id, clock.instant()); } /** * Return the active session id for a given application. * * @param applicationId an {@link ApplicationId} * @return session id of given application id. * @throws IllegalArgumentException if the application does not exist */ public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); } /** * Returns a transaction which deletes this application. */ public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) { return database().createDeleteTransaction(applicationId); } /** * Removes all applications not known to this from the config server state. */ public void removeUnusedApplications() { removeApplicationsExcept(Set.copyOf(activeApplications())); } /** * Closes the application repo. Once a repo has been closed, it should not be used again. */ public void close() { directoryCache.close(); } /** Returns the lock for changing the session status of the given application. */ public Lock lock(ApplicationId id) { return database().lock(id); } /** * Gets a config for the given app, or null if not found */ @Override public ConfigResponse resolveConfig(ApplicationId appId, GetConfigRequest req, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, TenantRepository.logPre(appId) + "Resolving for tenant '" + tenant + "' with handler for application '" + application + "'"); } return application.resolveConfig(req, responseFactory); } private void notifyReloadListeners(ApplicationSet applicationSet) { reloadListener.hostsUpdated(tenant, hostRegistry.getAllHosts()); reloadListener.configActivated(applicationSet); } /** * Activates the config of the given app. Notifies listeners * * @param applicationSet the {@link ApplicationSet} to be reloaded */ public void activateApplication(ApplicationSet applicationSet, long activeSessionId) { ApplicationId id = applicationSet.getId(); try (Lock lock = lock(id)) { if ( ! exists(id)) return; if (applicationSet.getApplicationGeneration() != activeSessionId) return; setLiveApp(applicationSet); notifyReloadListeners(applicationSet); } } public void removeApplication(ApplicationId applicationId) { try (Lock lock = lock(applicationId)) { if (exists(applicationId)) { log.log(Level.INFO, "Tried removing application " + applicationId + ", but it seems to have been deployed again"); return; } if (applicationMapper.hasApplication(applicationId, clock.instant())) { applicationMapper.remove(applicationId); hostRegistry.removeHostsForKey(applicationId); reloadListenersOnRemove(applicationId); tenantMetricUpdater.setApplications(applicationMapper.numApplications()); metrics.removeMetricUpdater(Metrics.createDimensions(applicationId)); log.log(Level.INFO, "Application removed: " + applicationId); } } } public void removeApplicationsExcept(Set<ApplicationId> applications) { for (ApplicationId activeApplication : applicationMapper.listApplicationIds()) { if ( ! applications.contains(activeApplication)) { removeApplication(activeApplication); } } } private void reloadListenersOnRemove(ApplicationId applicationId) { reloadListener.hostsUpdated(tenant, hostRegistry.getAllHosts()); reloadListener.applicationRemoved(applicationId); } private void setLiveApp(ApplicationSet applicationSet) { ApplicationId id = applicationSet.getId(); Collection<String> hostsForApp = applicationSet.getAllHosts(); hostRegistry.update(id, hostsForApp); applicationSet.updateHostMetrics(); tenantMetricUpdater.setApplications(applicationMapper.numApplications()); applicationMapper.register(id, applicationSet); } @Override public Set<ConfigKey<?>> listNamedConfigs(ApplicationId appId, Optional<Version> vespaVersion, ConfigKey<?> keyToMatch, boolean recursive) { Application application = getApplication(appId, vespaVersion); return listConfigs(application, keyToMatch, recursive); } private Set<ConfigKey<?>> listConfigs(Application application, ConfigKey<?> keyToMatch, boolean recursive) { Set<ConfigKey<?>> ret = new LinkedHashSet<>(); for (ConfigKey<?> key : application.allConfigsProduced()) { String configId = key.getConfigId(); if (recursive) { key = new ConfigKey<>(key.getName(), configId, key.getNamespace()); } else { key = new ConfigKey<>(key.getName(), configId.split("/")[0], key.getNamespace()); } if (keyToMatch != null) { String n = key.getName(); String ns = key.getNamespace(); if (n.equals(keyToMatch.getName()) && ns.equals(keyToMatch.getNamespace()) && configId.startsWith(keyToMatch.getConfigId()) && !(configId.equals(keyToMatch.getConfigId()))) { if (!recursive) { key = new ConfigKey<>(key.getName(), appendOneLevelOfId(keyToMatch.getConfigId(), configId), key.getNamespace()); } ret.add(key); } } else { ret.add(key); } } return ret; } @Override public Set<ConfigKey<?>> listConfigs(ApplicationId appId, Optional<Version> vespaVersion, boolean recursive) { Application application = getApplication(appId, vespaVersion); return listConfigs(application, null, recursive); } /** * Given baseIdSegment search/ and id search/qrservers/default.0, return search/qrservers * @return id segment with one extra level from the id appended */ String appendOneLevelOfId(String baseIdSegment, String id) { if ("".equals(baseIdSegment)) return id.split("/")[0]; String theRest = id.substring(baseIdSegment.length()); if ("".equals(theRest)) return id; theRest = theRest.replaceFirst("/", ""); String theRestFirstSeg = theRest.split("/")[0]; return baseIdSegment+"/"+theRestFirstSeg; } @Override public Set<ConfigKey<?>> allConfigsProduced(ApplicationId appId, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); return application.allConfigsProduced(); } private Application getApplication(ApplicationId appId, Optional<Version> vespaVersion) { try { return applicationMapper.getForVersion(appId, vespaVersion, clock.instant()); } catch (VersionDoesNotExistException ex) { throw new NotFoundException(String.format("%sNo such application (id %s): %s", TenantRepository.logPre(tenant), appId, ex.getMessage())); } } @Override public Set<String> allConfigIds(ApplicationId appId, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); return application.allConfigIds(); } @Override public boolean hasApplication(ApplicationId appId, Optional<Version> vespaVersion) { return hasHandler(appId, vespaVersion); } private boolean hasHandler(ApplicationId appId, Optional<Version> vespaVersion) { return applicationMapper.hasApplicationForVersion(appId, vespaVersion, clock.instant()); } @Override public ApplicationId resolveApplicationId(String hostName) { return hostRegistry.getKeyForHost(hostName); } @Override public Set<FileReference> listFileReferences(ApplicationId applicationId) { return applicationMapper.listApplications(applicationId).stream() .flatMap(app -> app.getModel().fileReferences().stream()) .collect(toSet()); } @Override public void verifyHosts(ApplicationId key, Collection<String> newHosts) { hostRegistry.verifyHosts(key, newHosts); reloadListener.verifyHostsAreAvailable(tenant, newHosts); } public HostValidator<ApplicationId> getHostValidator() { return this; } public ApplicationId getApplicationIdForHostName(String hostname) { return hostRegistry.getKeyForHost(hostname); } public TenantFileSystemDirs getTenantFileSystemDirs() { return tenantFileSystemDirs; } }
Right, thanks, will do as suggested
private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { ChildData data = event.getData(); if (data == null) return; ApplicationId applicationId = ApplicationId.fromSerializedForm(Path.fromString(data.getPath()).getName()); switch (event.getType()) { case CHILD_ADDED: /* A new application is added when a session is added, @see {@link com.yahoo.vespa.config.server.session.SessionRepository log.log(Level.FINE, TenantRepository.logPre(applicationId) + "Application added: " + applicationId); break; case CHILD_REMOVED: removeApplication(applicationId); break; case CHILD_UPDATED: break; default: break; } removeUnusedApplications(); }); }
if (data == null) return;
private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { switch (event.getType()) { case CHILD_ADDED: /* A new application is added when a session is added, @see {@link com.yahoo.vespa.config.server.session.SessionRepository ApplicationId applicationId = ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName()); log.log(Level.FINE, TenantRepository.logPre(applicationId) + "Application added: " + applicationId); break; case CHILD_REMOVED: removeApplication(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeUnusedApplications(); }); }
class TenantApplications implements RequestHandler, HostValidator<ApplicationId> { private static final Logger log = Logger.getLogger(TenantApplications.class.getName()); private final ApplicationCuratorDatabase database; private final Curator.DirectoryCache directoryCache; private final Executor zkWatcherExecutor; private final Metrics metrics; private final TenantName tenant; private final ReloadListener reloadListener; private final ConfigResponseFactory responseFactory; private final HostRegistry<ApplicationId> hostRegistry; private final ApplicationMapper applicationMapper = new ApplicationMapper(); private final MetricUpdater tenantMetricUpdater; private final Clock clock; private final TenantFileSystemDirs tenantFileSystemDirs; public TenantApplications(TenantName tenant, Curator curator, StripedExecutor<TenantName> zkWatcherExecutor, ExecutorService zkCacheExecutor, Metrics metrics, ReloadListener reloadListener, ConfigserverConfig configserverConfig, HostRegistry<ApplicationId> hostRegistry, TenantFileSystemDirs tenantFileSystemDirs, Clock clock) { this.database = new ApplicationCuratorDatabase(tenant, curator); this.tenant = tenant; this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenant, command); this.directoryCache = database.createApplicationsPathCache(zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); this.metrics = metrics; this.reloadListener = reloadListener; this.responseFactory = ConfigResponseFactory.create(configserverConfig); this.tenantMetricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenant)); this.hostRegistry = hostRegistry; this.tenantFileSystemDirs = tenantFileSystemDirs; this.clock = clock; } public static TenantApplications create(GlobalComponentRegistry componentRegistry, TenantName tenantName) { return new TenantApplications(tenantName, componentRegistry.getCurator(), componentRegistry.getZkWatcherExecutor(), componentRegistry.getZkCacheExecutor(), componentRegistry.getMetrics(), componentRegistry.getReloadListener(), componentRegistry.getConfigserverConfig(), componentRegistry.getHostRegistries().createApplicationHostRegistry(tenantName), new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName), componentRegistry.getClock()); } /** The curator backed ZK storage of this. */ public ApplicationCuratorDatabase database() { return database; } /** * List the active applications of a tenant in this config server. * * @return a list of {@link ApplicationId}s that are active. */ public List<ApplicationId> activeApplications() { return database().activeApplications(); } public boolean exists(ApplicationId id) { return database().exists(id); } /** * Returns the active session id for the given application. * Returns Optional.empty if application not found or no active session exists. */ public Optional<Long> activeSessionOf(ApplicationId id) { return database().activeSessionOf(id); } public boolean sessionExistsInFileSystem(long sessionId) { return Files.exists(Paths.get(tenantFileSystemDirs.sessionsPath().getAbsolutePath(), String.valueOf(sessionId))); } /** * Returns a transaction which writes the given session id as the currently active for the given application. * * @param applicationId An {@link ApplicationId} that represents an active application. * @param sessionId Id of the session containing the application package for this id. */ public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) { return database().createPutTransaction(applicationId, sessionId); } /** * Creates a node for the given application, marking its existence. */ public void createApplication(ApplicationId id) { database().createApplication(id, clock.instant()); } /** * Return the active session id for a given application. * * @param applicationId an {@link ApplicationId} * @return session id of given application id. * @throws IllegalArgumentException if the application does not exist */ public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); } /** * Returns a transaction which deletes this application. */ public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) { return database().createDeleteTransaction(applicationId); } /** * Removes all applications not known to this from the config server state. */ public void removeUnusedApplications() { removeApplicationsExcept(Set.copyOf(activeApplications())); } /** * Closes the application repo. Once a repo has been closed, it should not be used again. */ public void close() { directoryCache.close(); } /** Returns the lock for changing the session status of the given application. */ public Lock lock(ApplicationId id) { return database().lock(id); } /** * Gets a config for the given app, or null if not found */ @Override public ConfigResponse resolveConfig(ApplicationId appId, GetConfigRequest req, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, TenantRepository.logPre(appId) + "Resolving for tenant '" + tenant + "' with handler for application '" + application + "'"); } return application.resolveConfig(req, responseFactory); } private void notifyReloadListeners(ApplicationSet applicationSet) { reloadListener.hostsUpdated(tenant, hostRegistry.getAllHosts()); reloadListener.configActivated(applicationSet); } /** * Activates the config of the given app. Notifies listeners * * @param applicationSet the {@link ApplicationSet} to be reloaded */ public void activateApplication(ApplicationSet applicationSet, long activeSessionId) { ApplicationId id = applicationSet.getId(); try (Lock lock = lock(id)) { if ( ! exists(id)) return; if (applicationSet.getApplicationGeneration() != activeSessionId) return; setLiveApp(applicationSet); notifyReloadListeners(applicationSet); } } public void removeApplication(ApplicationId applicationId) { try (Lock lock = lock(applicationId)) { if (exists(applicationId)) { log.log(Level.INFO, "Tried removing application " + applicationId + ", but it seems to have been deployed again"); return; } if (applicationMapper.hasApplication(applicationId, clock.instant())) { applicationMapper.remove(applicationId); hostRegistry.removeHostsForKey(applicationId); reloadListenersOnRemove(applicationId); tenantMetricUpdater.setApplications(applicationMapper.numApplications()); metrics.removeMetricUpdater(Metrics.createDimensions(applicationId)); log.log(Level.INFO, "Application removed: " + applicationId); } } } public void removeApplicationsExcept(Set<ApplicationId> applications) { for (ApplicationId activeApplication : applicationMapper.listApplicationIds()) { if ( ! applications.contains(activeApplication)) { removeApplication(activeApplication); } } } private void reloadListenersOnRemove(ApplicationId applicationId) { reloadListener.hostsUpdated(tenant, hostRegistry.getAllHosts()); reloadListener.applicationRemoved(applicationId); } private void setLiveApp(ApplicationSet applicationSet) { ApplicationId id = applicationSet.getId(); Collection<String> hostsForApp = applicationSet.getAllHosts(); hostRegistry.update(id, hostsForApp); applicationSet.updateHostMetrics(); tenantMetricUpdater.setApplications(applicationMapper.numApplications()); applicationMapper.register(id, applicationSet); } @Override public Set<ConfigKey<?>> listNamedConfigs(ApplicationId appId, Optional<Version> vespaVersion, ConfigKey<?> keyToMatch, boolean recursive) { Application application = getApplication(appId, vespaVersion); return listConfigs(application, keyToMatch, recursive); } private Set<ConfigKey<?>> listConfigs(Application application, ConfigKey<?> keyToMatch, boolean recursive) { Set<ConfigKey<?>> ret = new LinkedHashSet<>(); for (ConfigKey<?> key : application.allConfigsProduced()) { String configId = key.getConfigId(); if (recursive) { key = new ConfigKey<>(key.getName(), configId, key.getNamespace()); } else { key = new ConfigKey<>(key.getName(), configId.split("/")[0], key.getNamespace()); } if (keyToMatch != null) { String n = key.getName(); String ns = key.getNamespace(); if (n.equals(keyToMatch.getName()) && ns.equals(keyToMatch.getNamespace()) && configId.startsWith(keyToMatch.getConfigId()) && !(configId.equals(keyToMatch.getConfigId()))) { if (!recursive) { key = new ConfigKey<>(key.getName(), appendOneLevelOfId(keyToMatch.getConfigId(), configId), key.getNamespace()); } ret.add(key); } } else { ret.add(key); } } return ret; } @Override public Set<ConfigKey<?>> listConfigs(ApplicationId appId, Optional<Version> vespaVersion, boolean recursive) { Application application = getApplication(appId, vespaVersion); return listConfigs(application, null, recursive); } /** * Given baseIdSegment search/ and id search/qrservers/default.0, return search/qrservers * @return id segment with one extra level from the id appended */ String appendOneLevelOfId(String baseIdSegment, String id) { if ("".equals(baseIdSegment)) return id.split("/")[0]; String theRest = id.substring(baseIdSegment.length()); if ("".equals(theRest)) return id; theRest = theRest.replaceFirst("/", ""); String theRestFirstSeg = theRest.split("/")[0]; return baseIdSegment+"/"+theRestFirstSeg; } @Override public Set<ConfigKey<?>> allConfigsProduced(ApplicationId appId, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); return application.allConfigsProduced(); } private Application getApplication(ApplicationId appId, Optional<Version> vespaVersion) { try { return applicationMapper.getForVersion(appId, vespaVersion, clock.instant()); } catch (VersionDoesNotExistException ex) { throw new NotFoundException(String.format("%sNo such application (id %s): %s", TenantRepository.logPre(tenant), appId, ex.getMessage())); } } @Override public Set<String> allConfigIds(ApplicationId appId, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); return application.allConfigIds(); } @Override public boolean hasApplication(ApplicationId appId, Optional<Version> vespaVersion) { return hasHandler(appId, vespaVersion); } private boolean hasHandler(ApplicationId appId, Optional<Version> vespaVersion) { return applicationMapper.hasApplicationForVersion(appId, vespaVersion, clock.instant()); } @Override public ApplicationId resolveApplicationId(String hostName) { return hostRegistry.getKeyForHost(hostName); } @Override public Set<FileReference> listFileReferences(ApplicationId applicationId) { return applicationMapper.listApplications(applicationId).stream() .flatMap(app -> app.getModel().fileReferences().stream()) .collect(toSet()); } @Override public void verifyHosts(ApplicationId key, Collection<String> newHosts) { hostRegistry.verifyHosts(key, newHosts); reloadListener.verifyHostsAreAvailable(tenant, newHosts); } public HostValidator<ApplicationId> getHostValidator() { return this; } public ApplicationId getApplicationIdForHostName(String hostname) { return hostRegistry.getKeyForHost(hostname); } public TenantFileSystemDirs getTenantFileSystemDirs() { return tenantFileSystemDirs; } }
class TenantApplications implements RequestHandler, HostValidator<ApplicationId> { private static final Logger log = Logger.getLogger(TenantApplications.class.getName()); private final ApplicationCuratorDatabase database; private final Curator.DirectoryCache directoryCache; private final Executor zkWatcherExecutor; private final Metrics metrics; private final TenantName tenant; private final ReloadListener reloadListener; private final ConfigResponseFactory responseFactory; private final HostRegistry<ApplicationId> hostRegistry; private final ApplicationMapper applicationMapper = new ApplicationMapper(); private final MetricUpdater tenantMetricUpdater; private final Clock clock; private final TenantFileSystemDirs tenantFileSystemDirs; public TenantApplications(TenantName tenant, Curator curator, StripedExecutor<TenantName> zkWatcherExecutor, ExecutorService zkCacheExecutor, Metrics metrics, ReloadListener reloadListener, ConfigserverConfig configserverConfig, HostRegistry<ApplicationId> hostRegistry, TenantFileSystemDirs tenantFileSystemDirs, Clock clock) { this.database = new ApplicationCuratorDatabase(tenant, curator); this.tenant = tenant; this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenant, command); this.directoryCache = database.createApplicationsPathCache(zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); this.metrics = metrics; this.reloadListener = reloadListener; this.responseFactory = ConfigResponseFactory.create(configserverConfig); this.tenantMetricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenant)); this.hostRegistry = hostRegistry; this.tenantFileSystemDirs = tenantFileSystemDirs; this.clock = clock; } public static TenantApplications create(GlobalComponentRegistry componentRegistry, TenantName tenantName) { return new TenantApplications(tenantName, componentRegistry.getCurator(), componentRegistry.getZkWatcherExecutor(), componentRegistry.getZkCacheExecutor(), componentRegistry.getMetrics(), componentRegistry.getReloadListener(), componentRegistry.getConfigserverConfig(), componentRegistry.getHostRegistries().createApplicationHostRegistry(tenantName), new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName), componentRegistry.getClock()); } /** The curator backed ZK storage of this. */ public ApplicationCuratorDatabase database() { return database; } /** * List the active applications of a tenant in this config server. * * @return a list of {@link ApplicationId}s that are active. */ public List<ApplicationId> activeApplications() { return database().activeApplications(); } public boolean exists(ApplicationId id) { return database().exists(id); } /** * Returns the active session id for the given application. * Returns Optional.empty if application not found or no active session exists. */ public Optional<Long> activeSessionOf(ApplicationId id) { return database().activeSessionOf(id); } public boolean sessionExistsInFileSystem(long sessionId) { return Files.exists(Paths.get(tenantFileSystemDirs.sessionsPath().getAbsolutePath(), String.valueOf(sessionId))); } /** * Returns a transaction which writes the given session id as the currently active for the given application. * * @param applicationId An {@link ApplicationId} that represents an active application. * @param sessionId Id of the session containing the application package for this id. */ public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) { return database().createPutTransaction(applicationId, sessionId); } /** * Creates a node for the given application, marking its existence. */ public void createApplication(ApplicationId id) { database().createApplication(id, clock.instant()); } /** * Return the active session id for a given application. * * @param applicationId an {@link ApplicationId} * @return session id of given application id. * @throws IllegalArgumentException if the application does not exist */ public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); } /** * Returns a transaction which deletes this application. */ public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) { return database().createDeleteTransaction(applicationId); } /** * Removes all applications not known to this from the config server state. */ public void removeUnusedApplications() { removeApplicationsExcept(Set.copyOf(activeApplications())); } /** * Closes the application repo. Once a repo has been closed, it should not be used again. */ public void close() { directoryCache.close(); } /** Returns the lock for changing the session status of the given application. */ public Lock lock(ApplicationId id) { return database().lock(id); } /** * Gets a config for the given app, or null if not found */ @Override public ConfigResponse resolveConfig(ApplicationId appId, GetConfigRequest req, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, TenantRepository.logPre(appId) + "Resolving for tenant '" + tenant + "' with handler for application '" + application + "'"); } return application.resolveConfig(req, responseFactory); } private void notifyReloadListeners(ApplicationSet applicationSet) { reloadListener.hostsUpdated(tenant, hostRegistry.getAllHosts()); reloadListener.configActivated(applicationSet); } /** * Activates the config of the given app. Notifies listeners * * @param applicationSet the {@link ApplicationSet} to be reloaded */ public void activateApplication(ApplicationSet applicationSet, long activeSessionId) { ApplicationId id = applicationSet.getId(); try (Lock lock = lock(id)) { if ( ! exists(id)) return; if (applicationSet.getApplicationGeneration() != activeSessionId) return; setLiveApp(applicationSet); notifyReloadListeners(applicationSet); } } public void removeApplication(ApplicationId applicationId) { try (Lock lock = lock(applicationId)) { if (exists(applicationId)) { log.log(Level.INFO, "Tried removing application " + applicationId + ", but it seems to have been deployed again"); return; } if (applicationMapper.hasApplication(applicationId, clock.instant())) { applicationMapper.remove(applicationId); hostRegistry.removeHostsForKey(applicationId); reloadListenersOnRemove(applicationId); tenantMetricUpdater.setApplications(applicationMapper.numApplications()); metrics.removeMetricUpdater(Metrics.createDimensions(applicationId)); log.log(Level.INFO, "Application removed: " + applicationId); } } } public void removeApplicationsExcept(Set<ApplicationId> applications) { for (ApplicationId activeApplication : applicationMapper.listApplicationIds()) { if ( ! applications.contains(activeApplication)) { removeApplication(activeApplication); } } } private void reloadListenersOnRemove(ApplicationId applicationId) { reloadListener.hostsUpdated(tenant, hostRegistry.getAllHosts()); reloadListener.applicationRemoved(applicationId); } private void setLiveApp(ApplicationSet applicationSet) { ApplicationId id = applicationSet.getId(); Collection<String> hostsForApp = applicationSet.getAllHosts(); hostRegistry.update(id, hostsForApp); applicationSet.updateHostMetrics(); tenantMetricUpdater.setApplications(applicationMapper.numApplications()); applicationMapper.register(id, applicationSet); } @Override public Set<ConfigKey<?>> listNamedConfigs(ApplicationId appId, Optional<Version> vespaVersion, ConfigKey<?> keyToMatch, boolean recursive) { Application application = getApplication(appId, vespaVersion); return listConfigs(application, keyToMatch, recursive); } private Set<ConfigKey<?>> listConfigs(Application application, ConfigKey<?> keyToMatch, boolean recursive) { Set<ConfigKey<?>> ret = new LinkedHashSet<>(); for (ConfigKey<?> key : application.allConfigsProduced()) { String configId = key.getConfigId(); if (recursive) { key = new ConfigKey<>(key.getName(), configId, key.getNamespace()); } else { key = new ConfigKey<>(key.getName(), configId.split("/")[0], key.getNamespace()); } if (keyToMatch != null) { String n = key.getName(); String ns = key.getNamespace(); if (n.equals(keyToMatch.getName()) && ns.equals(keyToMatch.getNamespace()) && configId.startsWith(keyToMatch.getConfigId()) && !(configId.equals(keyToMatch.getConfigId()))) { if (!recursive) { key = new ConfigKey<>(key.getName(), appendOneLevelOfId(keyToMatch.getConfigId(), configId), key.getNamespace()); } ret.add(key); } } else { ret.add(key); } } return ret; } @Override public Set<ConfigKey<?>> listConfigs(ApplicationId appId, Optional<Version> vespaVersion, boolean recursive) { Application application = getApplication(appId, vespaVersion); return listConfigs(application, null, recursive); } /** * Given baseIdSegment search/ and id search/qrservers/default.0, return search/qrservers * @return id segment with one extra level from the id appended */ String appendOneLevelOfId(String baseIdSegment, String id) { if ("".equals(baseIdSegment)) return id.split("/")[0]; String theRest = id.substring(baseIdSegment.length()); if ("".equals(theRest)) return id; theRest = theRest.replaceFirst("/", ""); String theRestFirstSeg = theRest.split("/")[0]; return baseIdSegment+"/"+theRestFirstSeg; } @Override public Set<ConfigKey<?>> allConfigsProduced(ApplicationId appId, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); return application.allConfigsProduced(); } private Application getApplication(ApplicationId appId, Optional<Version> vespaVersion) { try { return applicationMapper.getForVersion(appId, vespaVersion, clock.instant()); } catch (VersionDoesNotExistException ex) { throw new NotFoundException(String.format("%sNo such application (id %s): %s", TenantRepository.logPre(tenant), appId, ex.getMessage())); } } @Override public Set<String> allConfigIds(ApplicationId appId, Optional<Version> vespaVersion) { Application application = getApplication(appId, vespaVersion); return application.allConfigIds(); } @Override public boolean hasApplication(ApplicationId appId, Optional<Version> vespaVersion) { return hasHandler(appId, vespaVersion); } private boolean hasHandler(ApplicationId appId, Optional<Version> vespaVersion) { return applicationMapper.hasApplicationForVersion(appId, vespaVersion, clock.instant()); } @Override public ApplicationId resolveApplicationId(String hostName) { return hostRegistry.getKeyForHost(hostName); } @Override public Set<FileReference> listFileReferences(ApplicationId applicationId) { return applicationMapper.listApplications(applicationId).stream() .flatMap(app -> app.getModel().fileReferences().stream()) .collect(toSet()); } @Override public void verifyHosts(ApplicationId key, Collection<String> newHosts) { hostRegistry.verifyHosts(key, newHosts); reloadListener.verifyHostsAreAvailable(tenant, newHosts); } public HostValidator<ApplicationId> getHostValidator() { return this; } public ApplicationId getApplicationIdForHostName(String hostname) { return hostRegistry.getKeyForHost(hostname); } public TenantFileSystemDirs getTenantFileSystemDirs() { return tenantFileSystemDirs; } }
Perhaps obsolete now?
public void getConfig(ZookeeperServerConfig.Builder builder) { AbstractConfigProducer<?> parent = getParent(); if (parent == null) return; builder.myid(index()); }
if (parent == null) return;
public void getConfig(ZookeeperServerConfig.Builder builder) { builder.myid(index()); }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, boolean isHostedVespa) { this(parent, name, false, index, isHostedVespa); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, boolean isHostedVespa) { super(parent, name, retired, index, isHostedVespa); this.isHostedVespa = isHostedVespa; addComponent(getFS4ResourcePool()); } private static Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override public String getJvmOptions() { String jvmArgs = super.getJvmOptions(); return isHostedVespa && hasDocproc() ? ("".equals(jvmArgs) ? defaultHostedJVMArgs : defaultHostedJVMArgs + " " + jvmArgs) : jvmArgs; } private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, boolean isHostedVespa) { this(parent, name, false, index, isHostedVespa); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, boolean isHostedVespa) { super(parent, name, retired, index, isHostedVespa); this.isHostedVespa = isHostedVespa; addComponent(getFS4ResourcePool()); } private static Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override public String getJvmOptions() { String jvmArgs = super.getJvmOptions(); return isHostedVespa && hasDocproc() ? ("".equals(jvmArgs) ? defaultHostedJVMArgs : defaultHostedJVMArgs + " " + jvmArgs) : jvmArgs; } private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override }
Is this ever updated? Looks like it's only set when starting the server, so `shouldReconfigure` is always comparing the new config to the first config it received, not the current.
ZookeeperServerConfig existingConfig() { return zooKeeperRunner.zookeeperServerConfig(); }
return zooKeeperRunner.zookeeperServerConfig();
ZookeeperServerConfig existingConfig() { return zookeeperServerConfig; }
class Reconfigurer extends AbstractComponent { private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private static final int sessionTimeoutInSeconds = 30; private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(new ReconfigurationInfo(existingConfig(), newConfig)); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = existingConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); } void reconfigure(ReconfigurationInfo reconfigurationInfo) { List<String> joiningServers = reconfigurationInfo.joiningServers(); List<String> leavingServers = reconfigurationInfo.leavingServers(); List<String> addedServers = reconfigurationInfo.addedServers(); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(reconfigurationInfo.existingConfig()), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, leavingServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } } /** * Returns items in set a that are not in set b */ static List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private static List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } static class ReconfigurationInfo { private final ZookeeperServerConfig existingConfig; private final List<String> joiningServers; private final List<String> leavingServers; private final List<String> addedServers; public ReconfigurationInfo(ZookeeperServerConfig existingConfig, ZookeeperServerConfig newConfig) { this.existingConfig = existingConfig; List<String> originalServers = List.copyOf(servers(existingConfig)); this.joiningServers = servers(newConfig); this.leavingServers = setDifference(originalServers, servers(newConfig)); this.addedServers = setDifference(servers(newConfig), originalServers); } public ZookeeperServerConfig existingConfig() { return existingConfig; } public List<String> joiningServers() { return joiningServers; } public List<String> leavingServers() { return leavingServers; } public List<String> addedServers() { return addedServers; } } }
class Reconfigurer extends AbstractComponent { private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private static final int sessionTimeoutInSeconds = 30; private ZooKeeperRunner zooKeeperRunner; private ZookeeperServerConfig zookeeperServerConfig; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(new ReconfigurationInfo(existingConfig(), newConfig)); this.zookeeperServerConfig = newConfig; } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = existingConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); } void reconfigure(ReconfigurationInfo reconfigurationInfo) { List<String> joiningServers = reconfigurationInfo.joiningServers(); List<String> leavingServers = reconfigurationInfo.leavingServers(); List<String> addedServers = reconfigurationInfo.addedServers(); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(reconfigurationInfo.existingConfig()), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, leavingServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } } /** * Returns items in set a that are not in set b */ static List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private static List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } static class ReconfigurationInfo { private final ZookeeperServerConfig existingConfig; private final List<String> joiningServers; private final List<String> leavingServers; private final List<String> addedServers; public ReconfigurationInfo(ZookeeperServerConfig existingConfig, ZookeeperServerConfig newConfig) { this.existingConfig = existingConfig; List<String> originalServers = List.copyOf(servers(existingConfig)); this.joiningServers = servers(newConfig); this.leavingServers = setDifference(originalServers, servers(newConfig)); this.addedServers = setDifference(servers(newConfig), originalServers); } public ZookeeperServerConfig existingConfig() { return existingConfig; } public List<String> joiningServers() { return joiningServers; } public List<String> leavingServers() { return leavingServers; } public List<String> addedServers() { return addedServers; } } }
You are right, that's wrong.
ZookeeperServerConfig existingConfig() { return zooKeeperRunner.zookeeperServerConfig(); }
return zooKeeperRunner.zookeeperServerConfig();
ZookeeperServerConfig existingConfig() { return zookeeperServerConfig; }
class Reconfigurer extends AbstractComponent { private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private static final int sessionTimeoutInSeconds = 30; private ZooKeeperRunner zooKeeperRunner; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(new ReconfigurationInfo(existingConfig(), newConfig)); } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = existingConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); } void reconfigure(ReconfigurationInfo reconfigurationInfo) { List<String> joiningServers = reconfigurationInfo.joiningServers(); List<String> leavingServers = reconfigurationInfo.leavingServers(); List<String> addedServers = reconfigurationInfo.addedServers(); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(reconfigurationInfo.existingConfig()), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, leavingServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } } /** * Returns items in set a that are not in set b */ static List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private static List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } static class ReconfigurationInfo { private final ZookeeperServerConfig existingConfig; private final List<String> joiningServers; private final List<String> leavingServers; private final List<String> addedServers; public ReconfigurationInfo(ZookeeperServerConfig existingConfig, ZookeeperServerConfig newConfig) { this.existingConfig = existingConfig; List<String> originalServers = List.copyOf(servers(existingConfig)); this.joiningServers = servers(newConfig); this.leavingServers = setDifference(originalServers, servers(newConfig)); this.addedServers = setDifference(servers(newConfig), originalServers); } public ZookeeperServerConfig existingConfig() { return existingConfig; } public List<String> joiningServers() { return joiningServers; } public List<String> leavingServers() { return leavingServers; } public List<String> addedServers() { return addedServers; } } }
class Reconfigurer extends AbstractComponent { private static final Logger log = java.util.logging.Logger.getLogger(Reconfigurer.class.getName()); private static final int sessionTimeoutInSeconds = 30; private ZooKeeperRunner zooKeeperRunner; private ZookeeperServerConfig zookeeperServerConfig; @Inject public Reconfigurer() { log.log(Level.FINE, "Created ZooKeeperReconfigurer"); } void startOrReconfigure(ZookeeperServerConfig newConfig) { if (zooKeeperRunner == null) zooKeeperRunner = startServer(newConfig); if (shouldReconfigure(newConfig)) reconfigure(new ReconfigurationInfo(existingConfig(), newConfig)); this.zookeeperServerConfig = newConfig; } boolean shouldReconfigure(ZookeeperServerConfig newConfig) { ZookeeperServerConfig existingConfig = existingConfig(); if (!newConfig.dynamicReconfiguration() || existingConfig == null) return false; return !newConfig.equals(existingConfig); } private ZooKeeperRunner startServer(ZookeeperServerConfig zookeeperServerConfig) { return new ZooKeeperRunner(zookeeperServerConfig); } void reconfigure(ReconfigurationInfo reconfigurationInfo) { List<String> joiningServers = reconfigurationInfo.joiningServers(); List<String> leavingServers = reconfigurationInfo.leavingServers(); List<String> addedServers = reconfigurationInfo.addedServers(); log.log(Level.INFO, "Will reconfigure zookeeper cluster. Joining servers: " + joiningServers + ", leaving servers: " + leavingServers + ", new members" + addedServers); try { ZooKeeperAdmin zooKeeperAdmin = new ZooKeeperAdmin(connectionSpec(reconfigurationInfo.existingConfig()), sessionTimeoutInSeconds, null); long fromConfig = -1; zooKeeperAdmin.reconfigure(joiningServers, leavingServers, addedServers, fromConfig, null); } catch (IOException | KeeperException | InterruptedException e) { throw new RuntimeException(e); } } /** * Returns items in set a that are not in set b */ static List<String> setDifference(List<String> a, List<String> b) { Set<String> ret = new HashSet<>(a); ret.removeAll(b); return new ArrayList<>(ret); } private String connectionSpec(ZookeeperServerConfig config) { return String.join(",", servers(config)); } private static List<String> servers(ZookeeperServerConfig config) { return config.server().stream() .map(server -> server.hostname() + ":" + server.quorumPort() + ":" + server.electionPort()) .collect(Collectors.toList()); } static class ReconfigurationInfo { private final ZookeeperServerConfig existingConfig; private final List<String> joiningServers; private final List<String> leavingServers; private final List<String> addedServers; public ReconfigurationInfo(ZookeeperServerConfig existingConfig, ZookeeperServerConfig newConfig) { this.existingConfig = existingConfig; List<String> originalServers = List.copyOf(servers(existingConfig)); this.joiningServers = servers(newConfig); this.leavingServers = setDifference(originalServers, servers(newConfig)); this.addedServers = setDifference(servers(newConfig), originalServers); } public ZookeeperServerConfig existingConfig() { return existingConfig; } public List<String> joiningServers() { return joiningServers; } public List<String> leavingServers() { return leavingServers; } public List<String> addedServers() { return addedServers; } } }
Perhaps we could return status from first successful, if the first to return a response is non-200?
private static ClusterReindexing toClusterReindexing(SimpleHttpResponse response) throws IOException { if (response.getCode() != HttpStatus.SC_OK) throw new IOException("Expected status code 200, got " + response.getCode()); if (response.getBody() == null) throw new IOException("Response has no content"); return toClusterReindexing(response.getBodyBytes()); }
if (response.getCode() != HttpStatus.SC_OK) throw new IOException("Expected status code 200, got " + response.getCode());
private static ClusterReindexing toClusterReindexing(SimpleHttpResponse response) throws IOException { if (response.getCode() != HttpStatus.SC_OK) throw new IOException("Expected status code 200, got " + response.getCode()); if (response.getBody() == null) throw new IOException("Response has no content"); return toClusterReindexing(response.getBodyBytes()); }
class DefaultClusterReindexingStatusClient implements ClusterReindexingStatusClient { private static final ObjectMapper mapper = new ObjectMapper(); private final Executor executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("cluster-controller-reindexing-client-")); private final CloseableHttpAsyncClient httpClient = createHttpClient(); public DefaultClusterReindexingStatusClient() { httpClient.start(); } @Override public Map<String, ClusterReindexing> getReindexingStatus(ModelResult application) throws IOException { Map<ClusterId, List<ServiceInfo>> clusters = clusterControllerClusters(application); Map<ClusterId, CompletableFuture<ClusterReindexing>> futureStatusPerCluster = new HashMap<>(); clusters.forEach((clusterId, clusterNodes) -> { var parallelRequests = clusterNodes.stream() .map(this::getReindexingStatus) .collect(Collectors.toList()); CompletableFuture<ClusterReindexing> combinedRequest = CompletableFutures.firstOf(parallelRequests); futureStatusPerCluster.put(clusterId, combinedRequest); }); try { Map<String, ClusterReindexing> statusPerCluster = new HashMap<>(); futureStatusPerCluster.forEach((clusterId, futureStatus) -> { statusPerCluster.put(clusterId.s(), futureStatus.join()); }); return statusPerCluster; } catch (Exception e) { throw new IOException("Failed to get reindexing status from cluster controllers: " + e.getMessage(), e); } } @Override public void close() { uncheck(() -> httpClient.close()); } private CompletableFuture<ClusterReindexing> getReindexingStatus(ServiceInfo service) { URI uri = URI.create(String.format("http: CompletableFuture<SimpleHttpResponse> responsePromise = new CompletableFuture<>(); httpClient.execute(SimpleHttpRequests.get(uri), new FutureCallback<>() { @Override public void completed(SimpleHttpResponse result) { responsePromise.complete(result); } @Override public void failed(Exception ex) { responsePromise.completeExceptionally(ex); } @Override public void cancelled() { responsePromise.cancel(false); } }); return responsePromise.handleAsync((response, error) -> { if (response != null) { return uncheck(() -> toClusterReindexing(response)); } else { throw throwUnchecked(new IOException(String.format("For '%s': %s", uri, error.getMessage()), error)); } }, executor); } private static ClusterReindexing toClusterReindexing(byte[] requestBody) throws IOException { JsonNode jsonNode = mapper.readTree(requestBody); Map<String, ClusterReindexing.Status> documentStatuses = new HashMap<>(); for (JsonNode statusJson : jsonNode.get("status")) { String type = statusJson.get("type").textValue(); Instant startedMillis = Instant.ofEpochMilli(statusJson.get("startedMillis").longValue()); Instant endedMillis = Instant.ofEpochMilli(statusJson.get("endedMillis").longValue()); String progressToken = statusJson.get("progress").textValue(); ClusterReindexing.State state = ClusterReindexing.State.fromString(statusJson.get("state").textValue()); String message = statusJson.get("message").textValue(); documentStatuses.put(type, new ClusterReindexing.Status(startedMillis, endedMillis, state, message, progressToken)); } return new ClusterReindexing(documentStatuses); } private static int getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findAny() .orElseThrow(() -> new IllegalStateException("Cluster controller container has no container port")); } private static Map<ClusterId, List<ServiceInfo>> clusterControllerClusters(ModelResult application) { return application.getModel().getHosts().stream() .flatMap(host -> host.getServices().stream()) .filter(service -> service.getServiceType().equals(CLUSTERCONTROLLER_CONTAINER.serviceName)) .collect(Collectors.groupingBy(service -> new ClusterId(service.getProperty("clustername").get()))); } private static CloseableHttpAsyncClient createHttpClient() { return VespaAsyncHttpClientBuilder .create() .setIOReactorConfig(IOReactorConfig.custom() .setSoTimeout(Timeout.ofSeconds(2)) .build()) .setDefaultRequestConfig( RequestConfig.custom() .setConnectTimeout(Timeout.ofSeconds(2)) .setConnectionRequestTimeout(Timeout.ofSeconds(2)) .setResponseTimeout(Timeout.ofSeconds(4)) .build()) .setUserAgent("cluster-controller-reindexing-client") .build(); } }
class DefaultClusterReindexingStatusClient implements ClusterReindexingStatusClient { private static final ObjectMapper mapper = new ObjectMapper(); private final Executor executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("cluster-controller-reindexing-client-")); private final CloseableHttpAsyncClient httpClient = createHttpClient(); public DefaultClusterReindexingStatusClient() { httpClient.start(); } @Override public Map<String, ClusterReindexing> getReindexingStatus(ModelResult application) throws IOException { Map<ClusterId, List<ServiceInfo>> clusters = clusterControllerClusters(application); Map<ClusterId, CompletableFuture<ClusterReindexing>> futureStatusPerCluster = new HashMap<>(); clusters.forEach((clusterId, clusterNodes) -> { var parallelRequests = clusterNodes.stream() .map(this::getReindexingStatus) .collect(Collectors.toList()); CompletableFuture<ClusterReindexing> combinedRequest = CompletableFutures.firstOf(parallelRequests); futureStatusPerCluster.put(clusterId, combinedRequest); }); try { Map<String, ClusterReindexing> statusPerCluster = new HashMap<>(); futureStatusPerCluster.forEach((clusterId, futureStatus) -> { statusPerCluster.put(clusterId.s(), futureStatus.join()); }); return statusPerCluster; } catch (Exception e) { throw new IOException("Failed to get reindexing status from cluster controllers: " + e.getMessage(), e); } } @Override public void close() { uncheck(() -> httpClient.close()); } private CompletableFuture<ClusterReindexing> getReindexingStatus(ServiceInfo service) { URI uri = URI.create(String.format("http: CompletableFuture<SimpleHttpResponse> responsePromise = new CompletableFuture<>(); httpClient.execute(SimpleHttpRequests.get(uri), new FutureCallback<>() { @Override public void completed(SimpleHttpResponse result) { responsePromise.complete(result); } @Override public void failed(Exception ex) { responsePromise.completeExceptionally(ex); } @Override public void cancelled() { responsePromise.cancel(false); } }); return responsePromise.handleAsync((response, error) -> { if (response != null) { return uncheck(() -> toClusterReindexing(response)); } else { throw throwUnchecked(new IOException(String.format("For '%s': %s", uri, error.getMessage()), error)); } }, executor); } private static ClusterReindexing toClusterReindexing(byte[] requestBody) throws IOException { JsonNode jsonNode = mapper.readTree(requestBody); Map<String, ClusterReindexing.Status> documentStatuses = new HashMap<>(); for (JsonNode statusJson : jsonNode.get("status")) { String type = statusJson.get("type").textValue(); Instant startedMillis = Instant.ofEpochMilli(statusJson.get("startedMillis").longValue()); Instant endedMillis = Instant.ofEpochMilli(statusJson.get("endedMillis").longValue()); String progressToken = statusJson.get("progress").textValue(); ClusterReindexing.State state = ClusterReindexing.State.fromString(statusJson.get("state").textValue()); String message = statusJson.get("message").textValue(); documentStatuses.put(type, new ClusterReindexing.Status(startedMillis, endedMillis, state, message, progressToken)); } return new ClusterReindexing(documentStatuses); } private static int getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findAny() .orElseThrow(() -> new IllegalStateException("Cluster controller container has no container port")); } private static Map<ClusterId, List<ServiceInfo>> clusterControllerClusters(ModelResult application) { return application.getModel().getHosts().stream() .flatMap(host -> host.getServices().stream()) .filter(service -> service.getServiceType().equals(CLUSTERCONTROLLER_CONTAINER.serviceName)) .collect(Collectors.groupingBy(service -> new ClusterId(service.getProperty("clustername").get()))); } private static CloseableHttpAsyncClient createHttpClient() { return VespaAsyncHttpClientBuilder .create() .setIOReactorConfig(IOReactorConfig.custom() .setSoTimeout(Timeout.ofSeconds(2)) .build()) .setDefaultRequestConfig( RequestConfig.custom() .setConnectTimeout(Timeout.ofSeconds(2)) .setConnectionRequestTimeout(Timeout.ofSeconds(2)) .setResponseTimeout(Timeout.ofSeconds(4)) .build()) .setUserAgent("cluster-controller-reindexing-client") .build(); } }
This is always there, but can fix later.
private static ClusterReindexing toClusterReindexing(byte[] requestBody) throws IOException { JsonNode jsonNode = mapper.readTree(requestBody); Map<String, ClusterReindexing.Status> documentStatuses = new HashMap<>(); for (JsonNode statusJson : jsonNode.get("status")) { String type = statusJson.get("type").textValue(); Instant startedMillis = Instant.ofEpochMilli(statusJson.get("startedMillis").longValue()); Instant endedMillis = Optional.ofNullable(statusJson.get("endedMillis")) .map(json -> Instant.ofEpochMilli(json.longValue())) .orElse(null); String progressToken = Optional.ofNullable(statusJson.get("progress")) .map(JsonNode::textValue) .orElse(null); ClusterReindexing.State state = Optional.ofNullable(statusJson.get("state")) .map(json -> ClusterReindexing.State.fromString(json.textValue())) .orElse(null); String message = Optional.ofNullable(statusJson.get("message")) .map(JsonNode::textValue) .orElse(null); documentStatuses.put(type, new ClusterReindexing.Status(startedMillis, endedMillis, state, message, progressToken)); } return new ClusterReindexing(documentStatuses); }
ClusterReindexing.State state = Optional.ofNullable(statusJson.get("state"))
private static ClusterReindexing toClusterReindexing(byte[] requestBody) throws IOException { JsonNode jsonNode = mapper.readTree(requestBody); Map<String, ClusterReindexing.Status> documentStatuses = new HashMap<>(); for (JsonNode statusJson : jsonNode.get("status")) { String type = statusJson.get("type").textValue(); Instant startedMillis = Instant.ofEpochMilli(statusJson.get("startedMillis").longValue()); Instant endedMillis = Optional.ofNullable(statusJson.get("endedMillis")) .map(json -> Instant.ofEpochMilli(json.longValue())) .orElse(null); String progressToken = Optional.ofNullable(statusJson.get("progress")) .map(JsonNode::textValue) .orElse(null); ClusterReindexing.State state = Optional.ofNullable(statusJson.get("state")) .map(json -> ClusterReindexing.State.fromString(json.textValue())) .orElse(null); String message = Optional.ofNullable(statusJson.get("message")) .map(JsonNode::textValue) .orElse(null); documentStatuses.put(type, new ClusterReindexing.Status(startedMillis, endedMillis, state, message, progressToken)); } return new ClusterReindexing(documentStatuses); }
class DefaultClusterReindexingStatusClient implements ClusterReindexingStatusClient { private static final ObjectMapper mapper = new ObjectMapper(); private final Executor executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("cluster-controller-reindexing-client-")); private final CloseableHttpAsyncClient httpClient = createHttpClient(); public DefaultClusterReindexingStatusClient() { httpClient.start(); } @Override public Map<String, ClusterReindexing> getReindexingStatus(ModelResult application) throws IOException { Map<ClusterId, List<ServiceInfo>> clusters = clusterControllerClusters(application); Map<ClusterId, CompletableFuture<ClusterReindexing>> futureStatusPerCluster = new HashMap<>(); clusters.forEach((clusterId, clusterNodes) -> { var parallelRequests = clusterNodes.stream() .map(this::getReindexingStatus) .collect(Collectors.toList()); CompletableFuture<ClusterReindexing> combinedRequest = CompletableFutures.firstOf(parallelRequests); futureStatusPerCluster.put(clusterId, combinedRequest); }); try { Map<String, ClusterReindexing> statusPerCluster = new HashMap<>(); futureStatusPerCluster.forEach((clusterId, futureStatus) -> { statusPerCluster.put(clusterId.s(), futureStatus.join()); }); return statusPerCluster; } catch (Exception e) { throw new IOException("Failed to get reindexing status from cluster controllers: " + e.getMessage(), e); } } @Override public void close() { uncheck(() -> httpClient.close()); } private CompletableFuture<ClusterReindexing> getReindexingStatus(ServiceInfo service) { URI uri = URI.create(String.format("http: CompletableFuture<SimpleHttpResponse> responsePromise = new CompletableFuture<>(); httpClient.execute(SimpleHttpRequests.get(uri), new FutureCallback<>() { @Override public void completed(SimpleHttpResponse result) { responsePromise.complete(result); } @Override public void failed(Exception ex) { responsePromise.completeExceptionally(ex); } @Override public void cancelled() { responsePromise.cancel(false); } }); return responsePromise.handleAsync((response, error) -> { if (response != null) { return uncheck(() -> toClusterReindexing(response)); } else { throw throwUnchecked(new IOException(String.format("For '%s': %s", uri, error.getMessage()), error)); } }, executor); } private static ClusterReindexing toClusterReindexing(SimpleHttpResponse response) throws IOException { if (response.getCode() != HttpStatus.SC_OK) throw new IOException("Expected status code 200, got " + response.getCode()); if (response.getBody() == null) throw new IOException("Response has no content"); return toClusterReindexing(response.getBodyBytes()); } private static int getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findAny() .orElseThrow(() -> new IllegalStateException("Cluster controller container has no container port")); } private static Map<ClusterId, List<ServiceInfo>> clusterControllerClusters(ModelResult application) { return application.getModel().getHosts().stream() .flatMap(host -> host.getServices().stream()) .filter(service -> service.getServiceType().equals(CLUSTERCONTROLLER_CONTAINER.serviceName)) .collect(Collectors.groupingBy(service -> new ClusterId(service.getProperty("clustername").get()))); } private static CloseableHttpAsyncClient createHttpClient() { return VespaAsyncHttpClientBuilder .create() .setIOReactorConfig(IOReactorConfig.custom() .setSoTimeout(Timeout.ofSeconds(2)) .build()) .setDefaultRequestConfig( RequestConfig.custom() .setConnectTimeout(Timeout.ofSeconds(2)) .setConnectionRequestTimeout(Timeout.ofSeconds(2)) .setResponseTimeout(Timeout.ofSeconds(4)) .build()) .setUserAgent("cluster-controller-reindexing-client") .build(); } }
class DefaultClusterReindexingStatusClient implements ClusterReindexingStatusClient { private static final ObjectMapper mapper = new ObjectMapper(); private final Executor executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("cluster-controller-reindexing-client-")); private final CloseableHttpAsyncClient httpClient = createHttpClient(); public DefaultClusterReindexingStatusClient() { httpClient.start(); } @Override public Map<String, ClusterReindexing> getReindexingStatus(ModelResult application) throws IOException { Map<ClusterId, List<ServiceInfo>> clusters = clusterControllerClusters(application); Map<ClusterId, CompletableFuture<ClusterReindexing>> futureStatusPerCluster = new HashMap<>(); clusters.forEach((clusterId, clusterNodes) -> { var parallelRequests = clusterNodes.stream() .map(this::getReindexingStatus) .collect(Collectors.toList()); CompletableFuture<ClusterReindexing> combinedRequest = CompletableFutures.firstOf(parallelRequests); futureStatusPerCluster.put(clusterId, combinedRequest); }); try { Map<String, ClusterReindexing> statusPerCluster = new HashMap<>(); futureStatusPerCluster.forEach((clusterId, futureStatus) -> { statusPerCluster.put(clusterId.s(), futureStatus.join()); }); return statusPerCluster; } catch (Exception e) { throw new IOException("Failed to get reindexing status from cluster controllers: " + e.getMessage(), e); } } @Override public void close() { uncheck(() -> httpClient.close()); } private CompletableFuture<ClusterReindexing> getReindexingStatus(ServiceInfo service) { URI uri = URI.create(String.format("http: CompletableFuture<SimpleHttpResponse> responsePromise = new CompletableFuture<>(); httpClient.execute(SimpleHttpRequests.get(uri), new FutureCallback<>() { @Override public void completed(SimpleHttpResponse result) { responsePromise.complete(result); } @Override public void failed(Exception ex) { responsePromise.completeExceptionally(ex); } @Override public void cancelled() { responsePromise.cancel(false); } }); return responsePromise.handleAsync((response, error) -> { if (response != null) { return uncheck(() -> toClusterReindexing(response)); } else { throw throwUnchecked(new IOException(String.format("For '%s': %s", uri, error.getMessage()), error)); } }, executor); } private static ClusterReindexing toClusterReindexing(SimpleHttpResponse response) throws IOException { if (response.getCode() != HttpStatus.SC_OK) throw new IOException("Expected status code 200, got " + response.getCode()); if (response.getBody() == null) throw new IOException("Response has no content"); return toClusterReindexing(response.getBodyBytes()); } private static int getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findAny() .orElseThrow(() -> new IllegalStateException("Cluster controller container has no container port")); } private static Map<ClusterId, List<ServiceInfo>> clusterControllerClusters(ModelResult application) { return application.getModel().getHosts().stream() .flatMap(host -> host.getServices().stream()) .filter(service -> service.getServiceType().equals(CLUSTERCONTROLLER_CONTAINER.serviceName)) .collect(Collectors.groupingBy(service -> new ClusterId(service.getProperty("clustername").get()))); } private static CloseableHttpAsyncClient createHttpClient() { return VespaAsyncHttpClientBuilder .create() .setIOReactorConfig(IOReactorConfig.custom() .setSoTimeout(Timeout.ofSeconds(2)) .build()) .setDefaultRequestConfig( RequestConfig.custom() .setConnectTimeout(Timeout.ofSeconds(2)) .setConnectionRequestTimeout(Timeout.ofSeconds(2)) .setResponseTimeout(Timeout.ofSeconds(4)) .build()) .setUserAgent("cluster-controller-reindexing-client") .build(); } }
Thanks, done
public static NodeList of(Node ... nodes) { List<Node> nodeList = new ArrayList<>(); Collections.addAll(nodeList, nodes); return copyOf(nodeList); }
return copyOf(nodeList);
public static NodeList of(Node ... nodes) { return copyOf(List.of(nodes)); }
class NodeList extends AbstractFilteringList<Node, NodeList> { protected NodeList(List<Node> nodes, boolean negate) { super(nodes, negate, NodeList::new); } /** Returns the node with the given hostname from this list, or empty if it is not present */ public Optional<Node> node(String hostname) { return matching(node -> node.hostname().equals(hostname)).first(); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().retired()); } /** Returns the subset of nodes that are being deprovisioned */ public NodeList deprovisioning() { return matching(node -> node.status().wantToRetire() && node.status().wantToDeprovision()); } /** Returns the subset of nodes which are removable */ public NodeList removable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().isRemovable()); } /** Returns the subset of nodes having exactly the given resources */ public NodeList resources(NodeResources resources) { return matching(node -> node.resources().equals(resources)); } /** Returns the subset of nodes which satisfy the given resources */ public NodeList satisfies(NodeResources resources) { return matching(node -> node.resources().satisfies(resources)); } /** Returns the subset of nodes not in the given set */ public NodeList except(Set<Node> nodes) { return matching(node -> ! nodes.contains(node)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes that run containers */ public NodeList container() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().isContainer()); } /** Returns the subset of nodes that run a stateless service */ public NodeList stateless() { return matching(node -> node.allocation().isPresent() && ! node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that run a stateful service */ public NodeList stateful() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that are currently changing their Vespa version */ public NodeList changingVersion() { return matching(node -> node.status().vespaVersion().isPresent() && node.allocation().isPresent() && !node.status().vespaVersion().get().equals(node.allocation().get().membership().cluster().vespaVersion())); } /** Returns the subset of nodes with want to fail set to true */ public NodeList failing() { return matching(node -> node.status().wantToFail()); } /** Returns the subset of nodes that are currently changing their OS version to given version */ public NodeList changingOsVersionTo(Version version) { return matching(node -> node.status().osVersion().changingTo(version)); } /** Returns the subset of nodes that are currently changing their OS version */ public NodeList changingOsVersion() { return matching(node -> node.status().osVersion().changing()); } /** Returns a copy of this sorted by current OS version (lowest to highest) */ public NodeList byIncreasingOsVersion() { return sortedBy(Comparator.comparing(node -> node.status() .osVersion() .current() .orElse(Version.emptyVersion))); } /** Returns the subset of nodes that are currently on a lower version than the given version */ public NodeList osVersionIsBefore(Version version) { return matching(node -> node.status().osVersion().isBefore(version)); } /** Returns the subset of nodes that are currently on the given OS version */ public NodeList onOsVersion(Version version) { return matching(node -> node.status().osVersion().matches(version)); } /** Returns the subset of nodes assigned to the given cluster */ public NodeList cluster(ClusterSpec.Id cluster) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().id().equals(cluster)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return matching(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes allocated to a tester instance */ public NodeList tester() { return matching(node -> node.allocation().isPresent() && node.allocation().get().owner().instance().isTester()); } /** Returns the subset of nodes matching the given node type(s) */ public NodeList nodeType(NodeType first, NodeType... rest) { if (rest.length == 0) { return matching(node -> node.type() == first); } EnumSet<NodeType> nodeTypes = EnumSet.of(first, rest); return matching(node -> nodeTypes.contains(node.type())); } /** Returns the subset of nodes of the host type */ public NodeList hosts() { return nodeType(NodeType.host); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return matching(node -> node.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return matching(node -> node.hasParent(hostname)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Node.State first, Node.State... rest) { if (rest.length == 0) { return matching(node -> node.state() == first); } return state(EnumSet.of(first, rest)); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Set<Node.State> nodeStates) { return matching(node -> nodeStates.contains(node.state())); } /** Returns the subset of nodes which have a record of being down */ public NodeList down() { return matching(Node::isDown); } /** Returns the subset of nodes which have retirement requested */ public NodeList retirementRequested() { return matching(node -> node.status().wantToRetire() || node.status().preferToRetire()); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(NodeList children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); } /** Returns the nodes contained in the group identified by given index */ public NodeList group(int index) { return matching(n -> n.allocation().isPresent() && n.allocation().get().membership().cluster().group().equals(Optional.of(ClusterSpec.Group.from(index)))); } /** Returns the parent node of the given child node */ public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> stream().filter(node -> node.hostname().equals(parentHostname)) .findFirst()); } /** * Returns the cluster spec of the nodes in this, without any group designation * * @throws IllegalStateException if there are no nodes in thus list or they do not all belong * to the same cluster */ public ClusterSpec clusterSpec() { ensureSingleCluster(); if (isEmpty()) throw new IllegalStateException("No nodes"); return first().get().allocation().get().membership().cluster().with(Optional.empty()); } /** * Returns the resources of the nodes of this. * * NOTE: If the nodes do not all have the same values of node resources, a random pick among those node resources * will be returned. * * @throws IllegalStateException if the nodes in this do not all belong to the same cluster */ public ClusterResources toResources() { ensureSingleCluster(); if (isEmpty()) return new ClusterResources(0, 0, NodeResources.unspecified()); return new ClusterResources(size(), (int)stream().map(node -> node.allocation().get().membership().cluster().group().get()) .distinct() .count(), first().get().resources()); } /** Returns the nodes that are allocated on an exclusive network switch within its cluster */ public NodeList onExclusiveSwitch(NodeList clusterHosts) { ensureSingleCluster(); Map<String, Long> switchCount = clusterHosts.stream() .flatMap(host -> host.switchHostname().stream()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); return matching(node -> { Optional<Node> nodeOnSwitch = clusterHosts.parentOf(node); if (node.parentHostname().isPresent()) { if (nodeOnSwitch.isEmpty()) { throw new IllegalArgumentException("Parent of " + node + ", " + node.parentHostname().get() + ", not found in given cluster hosts"); } } else { nodeOnSwitch = Optional.of(node); } Optional<String> allocatedSwitch = nodeOnSwitch.flatMap(Node::switchHostname); return allocatedSwitch.isEmpty() || switchCount.get(allocatedSwitch.get()) == 1; }); } private void ensureSingleCluster() { if (isEmpty()) return; if (stream().anyMatch(node -> node.allocation().isEmpty())) throw new IllegalStateException("Some nodes are not allocated to a cluster"); ClusterSpec firstNodeSpec = first().get().allocation().get().membership().cluster().with(Optional.empty()); if (stream().map(node -> node.allocation().get().membership().cluster().with(Optional.empty())) .anyMatch(clusterSpec -> ! clusterSpec.equals(firstNodeSpec))) throw new IllegalStateException("Nodes belong to multiple clusters"); } /** Returns the nodes of this as a stream */ public Stream<Node> stream() { return asList().stream(); } public static NodeList copyOf(List<Node> nodes) { return new NodeList(nodes, false); } @Override public String toString() { return asList().toString(); } @Override public int hashCode() { return asList().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeList)) return false; return this.asList().equals(((NodeList) other).asList()); } }
class NodeList extends AbstractFilteringList<Node, NodeList> { protected NodeList(List<Node> nodes, boolean negate) { super(nodes, negate, NodeList::new); } /** Returns the node with the given hostname from this list, or empty if it is not present */ public Optional<Node> node(String hostname) { return matching(node -> node.hostname().equals(hostname)).first(); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().retired()); } /** Returns the subset of nodes that are being deprovisioned */ public NodeList deprovisioning() { return matching(node -> node.status().wantToRetire() && node.status().wantToDeprovision()); } /** Returns the subset of nodes which are removable */ public NodeList removable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().isRemovable()); } /** Returns the subset of nodes having exactly the given resources */ public NodeList resources(NodeResources resources) { return matching(node -> node.resources().equals(resources)); } /** Returns the subset of nodes which satisfy the given resources */ public NodeList satisfies(NodeResources resources) { return matching(node -> node.resources().satisfies(resources)); } /** Returns the subset of nodes not in the given set */ public NodeList except(Set<Node> nodes) { return matching(node -> ! nodes.contains(node)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes that run containers */ public NodeList container() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().isContainer()); } /** Returns the subset of nodes that run a stateless service */ public NodeList stateless() { return matching(node -> node.allocation().isPresent() && ! node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that run a stateful service */ public NodeList stateful() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that are currently changing their Vespa version */ public NodeList changingVersion() { return matching(node -> node.status().vespaVersion().isPresent() && node.allocation().isPresent() && !node.status().vespaVersion().get().equals(node.allocation().get().membership().cluster().vespaVersion())); } /** Returns the subset of nodes with want to fail set to true */ public NodeList failing() { return matching(node -> node.status().wantToFail()); } /** Returns the subset of nodes that are currently changing their OS version to given version */ public NodeList changingOsVersionTo(Version version) { return matching(node -> node.status().osVersion().changingTo(version)); } /** Returns the subset of nodes that are currently changing their OS version */ public NodeList changingOsVersion() { return matching(node -> node.status().osVersion().changing()); } /** Returns a copy of this sorted by current OS version (lowest to highest) */ public NodeList byIncreasingOsVersion() { return sortedBy(Comparator.comparing(node -> node.status() .osVersion() .current() .orElse(Version.emptyVersion))); } /** Returns the subset of nodes that are currently on a lower version than the given version */ public NodeList osVersionIsBefore(Version version) { return matching(node -> node.status().osVersion().isBefore(version)); } /** Returns the subset of nodes that are currently on the given OS version */ public NodeList onOsVersion(Version version) { return matching(node -> node.status().osVersion().matches(version)); } /** Returns the subset of nodes assigned to the given cluster */ public NodeList cluster(ClusterSpec.Id cluster) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().id().equals(cluster)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return matching(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes allocated to a tester instance */ public NodeList tester() { return matching(node -> node.allocation().isPresent() && node.allocation().get().owner().instance().isTester()); } /** Returns the subset of nodes matching the given node type(s) */ public NodeList nodeType(NodeType first, NodeType... rest) { if (rest.length == 0) { return matching(node -> node.type() == first); } EnumSet<NodeType> nodeTypes = EnumSet.of(first, rest); return matching(node -> nodeTypes.contains(node.type())); } /** Returns the subset of nodes of the host type */ public NodeList hosts() { return nodeType(NodeType.host); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return matching(node -> node.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return matching(node -> node.hasParent(hostname)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Node.State first, Node.State... rest) { if (rest.length == 0) { return matching(node -> node.state() == first); } return state(EnumSet.of(first, rest)); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Set<Node.State> nodeStates) { return matching(node -> nodeStates.contains(node.state())); } /** Returns the subset of nodes which have a record of being down */ public NodeList down() { return matching(Node::isDown); } /** Returns the subset of nodes which have retirement requested */ public NodeList retirementRequested() { return matching(node -> node.status().wantToRetire() || node.status().preferToRetire()); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(NodeList children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); } /** Returns the nodes contained in the group identified by given index */ public NodeList group(int index) { return matching(n -> n.allocation().isPresent() && n.allocation().get().membership().cluster().group().equals(Optional.of(ClusterSpec.Group.from(index)))); } /** Returns the parent node of the given child node */ public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> stream().filter(node -> node.hostname().equals(parentHostname)) .findFirst()); } /** * Returns the cluster spec of the nodes in this, without any group designation * * @throws IllegalStateException if there are no nodes in thus list or they do not all belong * to the same cluster */ public ClusterSpec clusterSpec() { ensureSingleCluster(); if (isEmpty()) throw new IllegalStateException("No nodes"); return first().get().allocation().get().membership().cluster().with(Optional.empty()); } /** * Returns the resources of the nodes of this. * * NOTE: If the nodes do not all have the same values of node resources, a random pick among those node resources * will be returned. * * @throws IllegalStateException if the nodes in this do not all belong to the same cluster */ public ClusterResources toResources() { ensureSingleCluster(); if (isEmpty()) return new ClusterResources(0, 0, NodeResources.unspecified()); return new ClusterResources(size(), (int)stream().map(node -> node.allocation().get().membership().cluster().group().get()) .distinct() .count(), first().get().resources()); } /** Returns the nodes that are allocated on an exclusive network switch within its cluster */ public NodeList onExclusiveSwitch(NodeList clusterHosts) { ensureSingleCluster(); Map<String, Long> switchCount = clusterHosts.stream() .flatMap(host -> host.switchHostname().stream()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); return matching(node -> { Optional<Node> nodeOnSwitch = clusterHosts.parentOf(node); if (node.parentHostname().isPresent()) { if (nodeOnSwitch.isEmpty()) { throw new IllegalArgumentException("Parent of " + node + ", " + node.parentHostname().get() + ", not found in given cluster hosts"); } } else { nodeOnSwitch = Optional.of(node); } Optional<String> allocatedSwitch = nodeOnSwitch.flatMap(Node::switchHostname); return allocatedSwitch.isEmpty() || switchCount.get(allocatedSwitch.get()) == 1; }); } private void ensureSingleCluster() { if (isEmpty()) return; if (stream().anyMatch(node -> node.allocation().isEmpty())) throw new IllegalStateException("Some nodes are not allocated to a cluster"); ClusterSpec firstNodeSpec = first().get().allocation().get().membership().cluster().with(Optional.empty()); if (stream().map(node -> node.allocation().get().membership().cluster().with(Optional.empty())) .anyMatch(clusterSpec -> ! clusterSpec.equals(firstNodeSpec))) throw new IllegalStateException("Nodes belong to multiple clusters"); } /** Returns the nodes of this as a stream */ public Stream<Node> stream() { return asList().stream(); } public static NodeList copyOf(List<Node> nodes) { return new NodeList(nodes, false); } @Override public String toString() { return asList().toString(); } @Override public int hashCode() { return asList().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeList)) return false; return this.asList().equals(((NodeList) other).asList()); } }
If we are guaranteed that this is set, this should be `.orElseThrow()`, otherwise we should filter this in `shouldUpdate()`
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(node.hostname().value()); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().orElse(null)); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); } } return true; }
updatedNode.setSwitchHostname(nodeEntity.switchHostname().orElse(null));
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(node.hostname().value()); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().get()); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); } } return true; }
class HostSwitchUpdater extends ControllerMaintainer { private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
class HostSwitchUpdater extends ControllerMaintainer { private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; if (nodeEntity.switchHostname().isEmpty()) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
NodeEntity isn't guaranteed to have a switch, and if it's removed we want to remove it from the node as well.
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(node.hostname().value()); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().orElse(null)); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); } } return true; }
updatedNode.setSwitchHostname(nodeEntity.switchHostname().orElse(null));
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(node.hostname().value()); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().get()); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); } } return true; }
class HostSwitchUpdater extends ControllerMaintainer { private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
class HostSwitchUpdater extends ControllerMaintainer { private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; if (nodeEntity.switchHostname().isEmpty()) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
I see, makes sense, but `NodeRepositoryNode` excludes `null`s, so I think this will just be a patch with `{}`?
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(node.hostname().value()); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().orElse(null)); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); } } return true; }
updatedNode.setSwitchHostname(nodeEntity.switchHostname().orElse(null));
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(node.hostname().value()); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().get()); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); } } return true; }
class HostSwitchUpdater extends ControllerMaintainer { private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
class HostSwitchUpdater extends ControllerMaintainer { private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; if (nodeEntity.switchHostname().isEmpty()) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
Wait for shutdown?
void shutdown() { executorService.shutdownNow(); }
executorService.shutdownNow();
void shutdown() { executorService.shutdown(); try { executorService.awaitTermination(10000, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.log(Level.INFO, "Interrupted waiting for executor to complete", e); } if ( ! executorService.isTerminated()) { executorService.shutdownNow(); } }
class ZooKeeperRunner implements Runnable { private static final Logger log = java.util.logging.Logger.getLogger(ZooKeeperRunner.class.getName()); private final ExecutorService executorService; private final ZookeeperServerConfig zookeeperServerConfig; public ZooKeeperRunner(ZookeeperServerConfig zookeeperServerConfig) { this.zookeeperServerConfig = zookeeperServerConfig; new Configurator(zookeeperServerConfig).writeConfigToDisk(TransportSecurityUtils.getOptions()); executorService = Executors.newSingleThreadExecutor(new DaemonThreadFactory("zookeeper server")); executorService.submit(this); } @Override public void run() { String[] args = new String[]{getDefaults().underVespaHome(zookeeperServerConfig.zooKeeperConfigFile())}; log.log(Level.INFO, "Starting ZooKeeper server with config file " + args[0] + ". Trying to establish ZooKeeper quorum (members: " + zookeeperServerHostnames(zookeeperServerConfig) + ")"); new Server().initializeAndRun(args); } /** * Extends QuoroumPeerMain to be able to call initializeAndRun() */ private static class Server extends QuorumPeerMain { @Override protected void initializeAndRun(String[] args) { try { super.initializeAndRun(args); } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) { throw new RuntimeException("Exception when initializing or running ZooKeeper server", e); } } } }
class ZooKeeperRunner implements Runnable { private static final Logger log = java.util.logging.Logger.getLogger(ZooKeeperRunner.class.getName()); private final ExecutorService executorService; private final ZookeeperServerConfig zookeeperServerConfig; public ZooKeeperRunner(ZookeeperServerConfig zookeeperServerConfig) { this.zookeeperServerConfig = zookeeperServerConfig; new Configurator(zookeeperServerConfig).writeConfigToDisk(TransportSecurityUtils.getOptions()); executorService = Executors.newSingleThreadExecutor(new DaemonThreadFactory("zookeeper server")); executorService.submit(this); } @Override public void run() { String[] args = new String[]{getDefaults().underVespaHome(zookeeperServerConfig.zooKeeperConfigFile())}; log.log(Level.INFO, "Starting ZooKeeper server with config file " + args[0] + ". Trying to establish ZooKeeper quorum (members: " + zookeeperServerHostnames(zookeeperServerConfig) + ")"); new Server().initializeAndRun(args); } /** * Extends QuoroumPeerMain to be able to call initializeAndRun() */ private static class Server extends QuorumPeerMain { @Override protected void initializeAndRun(String[] args) { try { super.initializeAndRun(args); } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) { throw new RuntimeException("Exception when initializing or running ZooKeeper server", e); } } } }
Yep.
void shutdown() { executorService.shutdownNow(); }
executorService.shutdownNow();
void shutdown() { executorService.shutdown(); try { executorService.awaitTermination(10000, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.log(Level.INFO, "Interrupted waiting for executor to complete", e); } if ( ! executorService.isTerminated()) { executorService.shutdownNow(); } }
class ZooKeeperRunner implements Runnable { private static final Logger log = java.util.logging.Logger.getLogger(ZooKeeperRunner.class.getName()); private final ExecutorService executorService; private final ZookeeperServerConfig zookeeperServerConfig; public ZooKeeperRunner(ZookeeperServerConfig zookeeperServerConfig) { this.zookeeperServerConfig = zookeeperServerConfig; new Configurator(zookeeperServerConfig).writeConfigToDisk(TransportSecurityUtils.getOptions()); executorService = Executors.newSingleThreadExecutor(new DaemonThreadFactory("zookeeper server")); executorService.submit(this); } @Override public void run() { String[] args = new String[]{getDefaults().underVespaHome(zookeeperServerConfig.zooKeeperConfigFile())}; log.log(Level.INFO, "Starting ZooKeeper server with config file " + args[0] + ". Trying to establish ZooKeeper quorum (members: " + zookeeperServerHostnames(zookeeperServerConfig) + ")"); new Server().initializeAndRun(args); } /** * Extends QuoroumPeerMain to be able to call initializeAndRun() */ private static class Server extends QuorumPeerMain { @Override protected void initializeAndRun(String[] args) { try { super.initializeAndRun(args); } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) { throw new RuntimeException("Exception when initializing or running ZooKeeper server", e); } } } }
class ZooKeeperRunner implements Runnable { private static final Logger log = java.util.logging.Logger.getLogger(ZooKeeperRunner.class.getName()); private final ExecutorService executorService; private final ZookeeperServerConfig zookeeperServerConfig; public ZooKeeperRunner(ZookeeperServerConfig zookeeperServerConfig) { this.zookeeperServerConfig = zookeeperServerConfig; new Configurator(zookeeperServerConfig).writeConfigToDisk(TransportSecurityUtils.getOptions()); executorService = Executors.newSingleThreadExecutor(new DaemonThreadFactory("zookeeper server")); executorService.submit(this); } @Override public void run() { String[] args = new String[]{getDefaults().underVespaHome(zookeeperServerConfig.zooKeeperConfigFile())}; log.log(Level.INFO, "Starting ZooKeeper server with config file " + args[0] + ". Trying to establish ZooKeeper quorum (members: " + zookeeperServerHostnames(zookeeperServerConfig) + ")"); new Server().initializeAndRun(args); } /** * Extends QuoroumPeerMain to be able to call initializeAndRun() */ private static class Server extends QuorumPeerMain { @Override protected void initializeAndRun(String[] args) { try { super.initializeAndRun(args); } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) { throw new RuntimeException("Exception when initializing or running ZooKeeper server", e); } } } }
== should be used here?
private static void validate(List<String> owners, Instant createdAt, Instant expiresAt) { if (expiresAt.isBefore(createdAt)) { throw new IllegalArgumentException( String.format( "Flag cannot expire before its creation date (createdAt='%s', expiresAt='%s')", createdAt, expiresAt)); } if (owners.equals(PermanentFlags.OWNERS)) { if (!createdAt.equals(PermanentFlags.CREATED_AT) || !expiresAt.equals(PermanentFlags.EXPIRES_AT)) { throw new IllegalArgumentException("Invalid creation or expiration date for permanent flag"); } } else if (owners.isEmpty()) { throw new IllegalArgumentException("Owner(s) must be specified"); } }
if (owners.equals(PermanentFlags.OWNERS)) {
private static void validate(List<String> owners, Instant createdAt, Instant expiresAt) { if (expiresAt.isBefore(createdAt)) { throw new IllegalArgumentException( String.format( "Flag cannot expire before its creation date (createdAt='%s', expiresAt='%s')", createdAt, expiresAt)); } if (owners == PermanentFlags.OWNERS) { if (!createdAt.equals(PermanentFlags.CREATED_AT) || !expiresAt.equals(PermanentFlags.EXPIRES_AT)) { throw new IllegalArgumentException("Invalid creation or expiration date for permanent flag"); } } else if (owners.isEmpty()) { throw new IllegalArgumentException("Owner(s) must be specified"); } }
class FlagDefinition { private final UnboundFlag<?, ?, ?> unboundFlag; private final List<String> owners; private final Instant createdAt; private final Instant expiresAt; private final String description; private final String modificationEffect; private final List<FetchVector.Dimension> dimensions; public FlagDefinition( UnboundFlag<?, ?, ?> unboundFlag, List<String> owners, Instant createdAt, Instant expiresAt, String description, String modificationEffect, FetchVector.Dimension... dimensions) { validate(owners, createdAt, expiresAt); this.unboundFlag = unboundFlag; this.owners = owners; this.createdAt = createdAt; this.expiresAt = expiresAt; this.description = description; this.modificationEffect = modificationEffect; this.dimensions = Collections.unmodifiableList(Arrays.asList(dimensions)); } public UnboundFlag<?, ?, ?> getUnboundFlag() { return unboundFlag; } public List<FetchVector.Dimension> getDimensions() { return dimensions; } public String getDescription() { return description; } public String getModificationEffect() { return modificationEffect; } public List<String> getOwners() { return owners; } public Instant getCreatedAt() { return createdAt; } public Instant getExpiresAt() { return expiresAt; } }
class FlagDefinition { private final UnboundFlag<?, ?, ?> unboundFlag; private final List<String> owners; private final Instant createdAt; private final Instant expiresAt; private final String description; private final String modificationEffect; private final List<FetchVector.Dimension> dimensions; public FlagDefinition( UnboundFlag<?, ?, ?> unboundFlag, List<String> owners, Instant createdAt, Instant expiresAt, String description, String modificationEffect, FetchVector.Dimension... dimensions) { validate(owners, createdAt, expiresAt); this.unboundFlag = unboundFlag; this.owners = owners; this.createdAt = createdAt; this.expiresAt = expiresAt; this.description = description; this.modificationEffect = modificationEffect; this.dimensions = Collections.unmodifiableList(Arrays.asList(dimensions)); } public UnboundFlag<?, ?, ?> getUnboundFlag() { return unboundFlag; } public List<FetchVector.Dimension> getDimensions() { return dimensions; } public String getDescription() { return description; } public String getModificationEffect() { return modificationEffect; } public List<String> getOwners() { return owners; } public Instant getCreatedAt() { return createdAt; } public Instant getExpiresAt() { return expiresAt; } }
I don't like that for anything except enums. Defensive copying of collections in constructors is a common pattern in our code base. That would break a reference equality check.
private static void validate(List<String> owners, Instant createdAt, Instant expiresAt) { if (expiresAt.isBefore(createdAt)) { throw new IllegalArgumentException( String.format( "Flag cannot expire before its creation date (createdAt='%s', expiresAt='%s')", createdAt, expiresAt)); } if (owners.equals(PermanentFlags.OWNERS)) { if (!createdAt.equals(PermanentFlags.CREATED_AT) || !expiresAt.equals(PermanentFlags.EXPIRES_AT)) { throw new IllegalArgumentException("Invalid creation or expiration date for permanent flag"); } } else if (owners.isEmpty()) { throw new IllegalArgumentException("Owner(s) must be specified"); } }
if (owners.equals(PermanentFlags.OWNERS)) {
private static void validate(List<String> owners, Instant createdAt, Instant expiresAt) { if (expiresAt.isBefore(createdAt)) { throw new IllegalArgumentException( String.format( "Flag cannot expire before its creation date (createdAt='%s', expiresAt='%s')", createdAt, expiresAt)); } if (owners == PermanentFlags.OWNERS) { if (!createdAt.equals(PermanentFlags.CREATED_AT) || !expiresAt.equals(PermanentFlags.EXPIRES_AT)) { throw new IllegalArgumentException("Invalid creation or expiration date for permanent flag"); } } else if (owners.isEmpty()) { throw new IllegalArgumentException("Owner(s) must be specified"); } }
class FlagDefinition { private final UnboundFlag<?, ?, ?> unboundFlag; private final List<String> owners; private final Instant createdAt; private final Instant expiresAt; private final String description; private final String modificationEffect; private final List<FetchVector.Dimension> dimensions; public FlagDefinition( UnboundFlag<?, ?, ?> unboundFlag, List<String> owners, Instant createdAt, Instant expiresAt, String description, String modificationEffect, FetchVector.Dimension... dimensions) { validate(owners, createdAt, expiresAt); this.unboundFlag = unboundFlag; this.owners = owners; this.createdAt = createdAt; this.expiresAt = expiresAt; this.description = description; this.modificationEffect = modificationEffect; this.dimensions = Collections.unmodifiableList(Arrays.asList(dimensions)); } public UnboundFlag<?, ?, ?> getUnboundFlag() { return unboundFlag; } public List<FetchVector.Dimension> getDimensions() { return dimensions; } public String getDescription() { return description; } public String getModificationEffect() { return modificationEffect; } public List<String> getOwners() { return owners; } public Instant getCreatedAt() { return createdAt; } public Instant getExpiresAt() { return expiresAt; } }
class FlagDefinition { private final UnboundFlag<?, ?, ?> unboundFlag; private final List<String> owners; private final Instant createdAt; private final Instant expiresAt; private final String description; private final String modificationEffect; private final List<FetchVector.Dimension> dimensions; public FlagDefinition( UnboundFlag<?, ?, ?> unboundFlag, List<String> owners, Instant createdAt, Instant expiresAt, String description, String modificationEffect, FetchVector.Dimension... dimensions) { validate(owners, createdAt, expiresAt); this.unboundFlag = unboundFlag; this.owners = owners; this.createdAt = createdAt; this.expiresAt = expiresAt; this.description = description; this.modificationEffect = modificationEffect; this.dimensions = Collections.unmodifiableList(Arrays.asList(dimensions)); } public UnboundFlag<?, ?, ?> getUnboundFlag() { return unboundFlag; } public List<FetchVector.Dimension> getDimensions() { return dimensions; } public String getDescription() { return description; } public String getModificationEffect() { return modificationEffect; } public List<String> getOwners() { return owners; } public Instant getCreatedAt() { return createdAt; } public Instant getExpiresAt() { return expiresAt; } }
As it is now, the below if-block is unreachable because of this, since if `owners.equals(PermanentFlags.OWNERS)` then `owners.isEmpty()` is true. This code should instead ask PermanentFlags if the Flag is owned by PermanentFlags, e.g. testing for reference-equals with a private static OWNERS, and then that class should validate the Flag instead of here.
private static void validate(List<String> owners, Instant createdAt, Instant expiresAt) { if (expiresAt.isBefore(createdAt)) { throw new IllegalArgumentException( String.format( "Flag cannot expire before its creation date (createdAt='%s', expiresAt='%s')", createdAt, expiresAt)); } if (owners.equals(PermanentFlags.OWNERS)) { if (!createdAt.equals(PermanentFlags.CREATED_AT) || !expiresAt.equals(PermanentFlags.EXPIRES_AT)) { throw new IllegalArgumentException("Invalid creation or expiration date for permanent flag"); } } else if (owners.isEmpty()) { throw new IllegalArgumentException("Owner(s) must be specified"); } }
if (owners.equals(PermanentFlags.OWNERS)) {
private static void validate(List<String> owners, Instant createdAt, Instant expiresAt) { if (expiresAt.isBefore(createdAt)) { throw new IllegalArgumentException( String.format( "Flag cannot expire before its creation date (createdAt='%s', expiresAt='%s')", createdAt, expiresAt)); } if (owners == PermanentFlags.OWNERS) { if (!createdAt.equals(PermanentFlags.CREATED_AT) || !expiresAt.equals(PermanentFlags.EXPIRES_AT)) { throw new IllegalArgumentException("Invalid creation or expiration date for permanent flag"); } } else if (owners.isEmpty()) { throw new IllegalArgumentException("Owner(s) must be specified"); } }
class FlagDefinition { private final UnboundFlag<?, ?, ?> unboundFlag; private final List<String> owners; private final Instant createdAt; private final Instant expiresAt; private final String description; private final String modificationEffect; private final List<FetchVector.Dimension> dimensions; public FlagDefinition( UnboundFlag<?, ?, ?> unboundFlag, List<String> owners, Instant createdAt, Instant expiresAt, String description, String modificationEffect, FetchVector.Dimension... dimensions) { validate(owners, createdAt, expiresAt); this.unboundFlag = unboundFlag; this.owners = owners; this.createdAt = createdAt; this.expiresAt = expiresAt; this.description = description; this.modificationEffect = modificationEffect; this.dimensions = Collections.unmodifiableList(Arrays.asList(dimensions)); } public UnboundFlag<?, ?, ?> getUnboundFlag() { return unboundFlag; } public List<FetchVector.Dimension> getDimensions() { return dimensions; } public String getDescription() { return description; } public String getModificationEffect() { return modificationEffect; } public List<String> getOwners() { return owners; } public Instant getCreatedAt() { return createdAt; } public Instant getExpiresAt() { return expiresAt; } }
class FlagDefinition { private final UnboundFlag<?, ?, ?> unboundFlag; private final List<String> owners; private final Instant createdAt; private final Instant expiresAt; private final String description; private final String modificationEffect; private final List<FetchVector.Dimension> dimensions; public FlagDefinition( UnboundFlag<?, ?, ?> unboundFlag, List<String> owners, Instant createdAt, Instant expiresAt, String description, String modificationEffect, FetchVector.Dimension... dimensions) { validate(owners, createdAt, expiresAt); this.unboundFlag = unboundFlag; this.owners = owners; this.createdAt = createdAt; this.expiresAt = expiresAt; this.description = description; this.modificationEffect = modificationEffect; this.dimensions = Collections.unmodifiableList(Arrays.asList(dimensions)); } public UnboundFlag<?, ?, ?> getUnboundFlag() { return unboundFlag; } public List<FetchVector.Dimension> getDimensions() { return dimensions; } public String getDescription() { return description; } public String getModificationEffect() { return modificationEffect; } public List<String> getOwners() { return owners; } public Instant getCreatedAt() { return createdAt; } public Instant getExpiresAt() { return expiresAt; } }
I agree, the code was confusing. I changed line 70 to use your suggestion to check if flag originates from `PermanentFlags`.
private static void validate(List<String> owners, Instant createdAt, Instant expiresAt) { if (expiresAt.isBefore(createdAt)) { throw new IllegalArgumentException( String.format( "Flag cannot expire before its creation date (createdAt='%s', expiresAt='%s')", createdAt, expiresAt)); } if (owners.equals(PermanentFlags.OWNERS)) { if (!createdAt.equals(PermanentFlags.CREATED_AT) || !expiresAt.equals(PermanentFlags.EXPIRES_AT)) { throw new IllegalArgumentException("Invalid creation or expiration date for permanent flag"); } } else if (owners.isEmpty()) { throw new IllegalArgumentException("Owner(s) must be specified"); } }
if (owners.equals(PermanentFlags.OWNERS)) {
private static void validate(List<String> owners, Instant createdAt, Instant expiresAt) { if (expiresAt.isBefore(createdAt)) { throw new IllegalArgumentException( String.format( "Flag cannot expire before its creation date (createdAt='%s', expiresAt='%s')", createdAt, expiresAt)); } if (owners == PermanentFlags.OWNERS) { if (!createdAt.equals(PermanentFlags.CREATED_AT) || !expiresAt.equals(PermanentFlags.EXPIRES_AT)) { throw new IllegalArgumentException("Invalid creation or expiration date for permanent flag"); } } else if (owners.isEmpty()) { throw new IllegalArgumentException("Owner(s) must be specified"); } }
class FlagDefinition { private final UnboundFlag<?, ?, ?> unboundFlag; private final List<String> owners; private final Instant createdAt; private final Instant expiresAt; private final String description; private final String modificationEffect; private final List<FetchVector.Dimension> dimensions; public FlagDefinition( UnboundFlag<?, ?, ?> unboundFlag, List<String> owners, Instant createdAt, Instant expiresAt, String description, String modificationEffect, FetchVector.Dimension... dimensions) { validate(owners, createdAt, expiresAt); this.unboundFlag = unboundFlag; this.owners = owners; this.createdAt = createdAt; this.expiresAt = expiresAt; this.description = description; this.modificationEffect = modificationEffect; this.dimensions = Collections.unmodifiableList(Arrays.asList(dimensions)); } public UnboundFlag<?, ?, ?> getUnboundFlag() { return unboundFlag; } public List<FetchVector.Dimension> getDimensions() { return dimensions; } public String getDescription() { return description; } public String getModificationEffect() { return modificationEffect; } public List<String> getOwners() { return owners; } public Instant getCreatedAt() { return createdAt; } public Instant getExpiresAt() { return expiresAt; } }
class FlagDefinition { private final UnboundFlag<?, ?, ?> unboundFlag; private final List<String> owners; private final Instant createdAt; private final Instant expiresAt; private final String description; private final String modificationEffect; private final List<FetchVector.Dimension> dimensions; public FlagDefinition( UnboundFlag<?, ?, ?> unboundFlag, List<String> owners, Instant createdAt, Instant expiresAt, String description, String modificationEffect, FetchVector.Dimension... dimensions) { validate(owners, createdAt, expiresAt); this.unboundFlag = unboundFlag; this.owners = owners; this.createdAt = createdAt; this.expiresAt = expiresAt; this.description = description; this.modificationEffect = modificationEffect; this.dimensions = Collections.unmodifiableList(Arrays.asList(dimensions)); } public UnboundFlag<?, ?, ?> getUnboundFlag() { return unboundFlag; } public List<FetchVector.Dimension> getDimensions() { return dimensions; } public String getDescription() { return description; } public String getModificationEffect() { return modificationEffect; } public List<String> getOwners() { return owners; } public Instant getCreatedAt() { return createdAt; } public Instant getExpiresAt() { return expiresAt; } }
This is probably not intended to be included
public void processMessage(Message message) { super.processMessage(message); if (++numSent < windowSize * resizeRate) { return; } long time = timer.milliTime(); double elapsed = time - resizeTime; resizeTime = time; double throughput = numOk / elapsed; numSent = 0; numOk = 0; if (maxThroughput > 0 && throughput > maxThroughput * 0.95) { } else if (throughput > localMaxThroughput) { localMaxThroughput = throughput; windowSize += weight * windowSizeIncrement; if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput); } } else { double period = 1; while(throughput * period / windowSize < 2) { period *= 10; } while(throughput * period / windowSize > 2) { period *= 0.1; } double efficiency = throughput * period / windowSize; if (Math.random() < 1e-2) System.err.println(efficiency); if (efficiency < efficiencyThreshold) { windowSize = Math.min(windowSize * windowSizeBackOff, windowSize - decrementFactor * windowSizeIncrement); localMaxThroughput = 0; } else { windowSize += weight * windowSizeIncrement; } if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput + " efficiency " + efficiency); } } windowSize = Math.max(minWindowSize, windowSize); windowSize = Math.min(maxWindowSize, windowSize); }
if (Math.random() < 1e-2) System.err.println(efficiency);
public void processMessage(Message message) { super.processMessage(message); if (++numSent < windowSize * resizeRate) { return; } long time = timer.milliTime(); double elapsed = time - resizeTime; resizeTime = time; double throughput = numOk / elapsed; numSent = 0; numOk = 0; if (maxThroughput > 0 && throughput > maxThroughput * 0.95) { } else if (throughput > localMaxThroughput) { localMaxThroughput = throughput; windowSize += weight * windowSizeIncrement; if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput); } } else { double period = 1; while(throughput * period / windowSize < 2) { period *= 10; } while(throughput * period / windowSize > 2) { period *= 0.1; } double efficiency = throughput * period / windowSize; if (efficiency < efficiencyThreshold) { windowSize = Math.min(windowSize * windowSizeBackOff, windowSize - decrementFactor * windowSizeIncrement); localMaxThroughput = 0; } else { windowSize += weight * windowSizeIncrement; } if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput + " efficiency " + efficiency); } } windowSize = Math.max(minWindowSize, windowSize); windowSize = Math.min(maxWindowSize, windowSize); }
class using the given clock to calculate efficiency. * * @param timer the timer to use */ public DynamicThrottlePolicy(Timer timer) { this.timer = timer; this.timeOfLastMessage = timer.milliTime(); }
class using the given clock to calculate efficiency. * * @param timer the timer to use */ public DynamicThrottlePolicy(Timer timer) { this.timer = timer; this.timeOfLastMessage = timer.milliTime(); }
Absolutely not.
public void processMessage(Message message) { super.processMessage(message); if (++numSent < windowSize * resizeRate) { return; } long time = timer.milliTime(); double elapsed = time - resizeTime; resizeTime = time; double throughput = numOk / elapsed; numSent = 0; numOk = 0; if (maxThroughput > 0 && throughput > maxThroughput * 0.95) { } else if (throughput > localMaxThroughput) { localMaxThroughput = throughput; windowSize += weight * windowSizeIncrement; if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput); } } else { double period = 1; while(throughput * period / windowSize < 2) { period *= 10; } while(throughput * period / windowSize > 2) { period *= 0.1; } double efficiency = throughput * period / windowSize; if (Math.random() < 1e-2) System.err.println(efficiency); if (efficiency < efficiencyThreshold) { windowSize = Math.min(windowSize * windowSizeBackOff, windowSize - decrementFactor * windowSizeIncrement); localMaxThroughput = 0; } else { windowSize += weight * windowSizeIncrement; } if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput + " efficiency " + efficiency); } } windowSize = Math.max(minWindowSize, windowSize); windowSize = Math.min(maxWindowSize, windowSize); }
if (Math.random() < 1e-2) System.err.println(efficiency);
public void processMessage(Message message) { super.processMessage(message); if (++numSent < windowSize * resizeRate) { return; } long time = timer.milliTime(); double elapsed = time - resizeTime; resizeTime = time; double throughput = numOk / elapsed; numSent = 0; numOk = 0; if (maxThroughput > 0 && throughput > maxThroughput * 0.95) { } else if (throughput > localMaxThroughput) { localMaxThroughput = throughput; windowSize += weight * windowSizeIncrement; if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput); } } else { double period = 1; while(throughput * period / windowSize < 2) { period *= 10; } while(throughput * period / windowSize > 2) { period *= 0.1; } double efficiency = throughput * period / windowSize; if (efficiency < efficiencyThreshold) { windowSize = Math.min(windowSize * windowSizeBackOff, windowSize - decrementFactor * windowSizeIncrement); localMaxThroughput = 0; } else { windowSize += weight * windowSizeIncrement; } if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput + " efficiency " + efficiency); } } windowSize = Math.max(minWindowSize, windowSize); windowSize = Math.min(maxWindowSize, windowSize); }
class using the given clock to calculate efficiency. * * @param timer the timer to use */ public DynamicThrottlePolicy(Timer timer) { this.timer = timer; this.timeOfLastMessage = timer.milliTime(); }
class using the given clock to calculate efficiency. * * @param timer the timer to use */ public DynamicThrottlePolicy(Timer timer) { this.timer = timer; this.timeOfLastMessage = timer.milliTime(); }
Consider simplifying query to only contain _mytensor2_.
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor1%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution);
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=mytensor1:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=mytensor2:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
class QueryValidatorTestCase { @Test }
class QueryValidatorTestCase { @Test }
```suggestion ```
public void processMessage(Message message) { super.processMessage(message); if (++numSent < windowSize * resizeRate) { return; } long time = timer.milliTime(); double elapsed = time - resizeTime; resizeTime = time; double throughput = numOk / elapsed; numSent = 0; numOk = 0; if (maxThroughput > 0 && throughput > maxThroughput * 0.95) { } else if (throughput > localMaxThroughput) { localMaxThroughput = throughput; windowSize += weight * windowSizeIncrement; if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput); } } else { double period = 1; while(throughput * period / windowSize < 2) { period *= 10; } while(throughput * period / windowSize > 2) { period *= 0.1; } double efficiency = throughput * period / windowSize; if (Math.random() < 1e-2) System.err.println(efficiency); if (efficiency < efficiencyThreshold) { windowSize = Math.min(windowSize * windowSizeBackOff, windowSize - decrementFactor * windowSizeIncrement); localMaxThroughput = 0; } else { windowSize += weight * windowSizeIncrement; } if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput + " efficiency " + efficiency); } } windowSize = Math.max(minWindowSize, windowSize); windowSize = Math.min(maxWindowSize, windowSize); }
if (Math.random() < 1e-2) System.err.println(efficiency);
public void processMessage(Message message) { super.processMessage(message); if (++numSent < windowSize * resizeRate) { return; } long time = timer.milliTime(); double elapsed = time - resizeTime; resizeTime = time; double throughput = numOk / elapsed; numSent = 0; numOk = 0; if (maxThroughput > 0 && throughput > maxThroughput * 0.95) { } else if (throughput > localMaxThroughput) { localMaxThroughput = throughput; windowSize += weight * windowSizeIncrement; if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput); } } else { double period = 1; while(throughput * period / windowSize < 2) { period *= 10; } while(throughput * period / windowSize > 2) { period *= 0.1; } double efficiency = throughput * period / windowSize; if (efficiency < efficiencyThreshold) { windowSize = Math.min(windowSize * windowSizeBackOff, windowSize - decrementFactor * windowSizeIncrement); localMaxThroughput = 0; } else { windowSize += weight * windowSizeIncrement; } if (log.isLoggable(Level.FINE)) { log.log(Level.FINE, "windowSize " + windowSize + " throughput " + throughput + " local max " + localMaxThroughput + " efficiency " + efficiency); } } windowSize = Math.max(minWindowSize, windowSize); windowSize = Math.min(maxWindowSize, windowSize); }
class using the given clock to calculate efficiency. * * @param timer the timer to use */ public DynamicThrottlePolicy(Timer timer) { this.timer = timer; this.timeOfLastMessage = timer.milliTime(); }
class using the given clock to calculate efficiency. * * @param timer the timer to use */ public DynamicThrottlePolicy(Timer timer) { this.timer = timer; this.timeOfLastMessage = timer.milliTime(); }
Could also limit this to `yahoo` cloud zones...
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); int nodesUpdated = 0; try { for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(node.hostname().value()); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().get()); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); nodesUpdated++; } } } finally { if (nodesUpdated > 0) { log.info("Updated switch hostname for " + nodesUpdated + " node(s)"); } } return true; }
for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) {
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); int nodesUpdated = 0; try { for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(registeredHostnameOf(node)); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().get()); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); nodesUpdated++; } } } finally { if (nodesUpdated > 0) { LOG.info("Updated switch hostname for " + nodesUpdated + " node(s)"); } } return true; }
class HostSwitchUpdater extends ControllerMaintainer { private static final Logger log = Logger.getLogger(HostSwitchUpdater.class.getName()); private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval, null, EnumSet.of(SystemName.cd, SystemName.main)); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; if (nodeEntity.switchHostname().isEmpty()) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
class HostSwitchUpdater extends ControllerMaintainer { private static final Logger LOG = Logger.getLogger(HostSwitchUpdater.class.getName()); private static final Pattern HOST_PATTERN = Pattern.compile("^(proxy|cfg|controller)host(.+)$"); private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval, null, EnumSet.of(SystemName.cd, SystemName.main)); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override /** Returns the hostname that given host is registered under in the {@link EntityService} */ private static String registeredHostnameOf(Node host) { String hostname = host.hostname().value(); if (!host.type().isHost()) return hostname; Matcher matcher = HOST_PATTERN.matcher(hostname); if (!matcher.matches()) return hostname; return matcher.replaceFirst("$1$2"); } private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; if (nodeEntity.switchHostname().isEmpty()) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
Yes, but that's not available here so would have to use string. 😞
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); int nodesUpdated = 0; try { for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(node.hostname().value()); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().get()); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); nodesUpdated++; } } } finally { if (nodesUpdated > 0) { log.info("Updated switch hostname for " + nodesUpdated + " node(s)"); } } return true; }
for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) {
protected boolean maintain() { Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream() .collect(Collectors.toMap(NodeEntity::hostname, Function.identity())); int nodesUpdated = 0; try { for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) { for (var node : nodeRepository.list(zone)) { NodeEntity nodeEntity = nodeEntities.get(registeredHostnameOf(node)); if (!shouldUpdate(node, nodeEntity)) continue; NodeRepositoryNode updatedNode = new NodeRepositoryNode(); updatedNode.setSwitchHostname(nodeEntity.switchHostname().get()); nodeRepository.patchNode(zone, node.hostname().value(), updatedNode); nodesUpdated++; } } } finally { if (nodesUpdated > 0) { LOG.info("Updated switch hostname for " + nodesUpdated + " node(s)"); } } return true; }
class HostSwitchUpdater extends ControllerMaintainer { private static final Logger log = Logger.getLogger(HostSwitchUpdater.class.getName()); private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval, null, EnumSet.of(SystemName.cd, SystemName.main)); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; if (nodeEntity.switchHostname().isEmpty()) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
class HostSwitchUpdater extends ControllerMaintainer { private static final Logger LOG = Logger.getLogger(HostSwitchUpdater.class.getName()); private static final Pattern HOST_PATTERN = Pattern.compile("^(proxy|cfg|controller)host(.+)$"); private final NodeRepository nodeRepository; public HostSwitchUpdater(Controller controller, Duration interval) { super(controller, interval, null, EnumSet.of(SystemName.cd, SystemName.main)); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override /** Returns the hostname that given host is registered under in the {@link EntityService} */ private static String registeredHostnameOf(Node host) { String hostname = host.hostname().value(); if (!host.type().isHost()) return hostname; Matcher matcher = HOST_PATTERN.matcher(hostname); if (!matcher.matches()) return hostname; return matcher.replaceFirst("$1$2"); } private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) { if (nodeEntity == null) return false; if (nodeEntity.switchHostname().isEmpty()) return false; return !node.switchHostname().equals(nodeEntity.switchHostname()); } }
Consider simplifying query to only contain _mytensor1_.
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor1%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor1%3A%3E0"), execution);
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=mytensor1:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=mytensor2:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
class QueryValidatorTestCase { @Test }
class QueryValidatorTestCase { @Test }
👍
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor1%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution);
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=mytensor1:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=mytensor2:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
class QueryValidatorTestCase { @Test }
class QueryValidatorTestCase { @Test }
done, thanks
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor1%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor1%3A%3E0"), execution);
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=mytensor1:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=mytensor2:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
class QueryValidatorTestCase { @Test }
class QueryValidatorTestCase { @Test }
done, thanks
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor1%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution);
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=mytensor1:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=mytensor2:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
class QueryValidatorTestCase { @Test }
class QueryValidatorTestCase { @Test }
Reason for complicated query perhaps to verify nothing's lost?
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor1%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
new QueryValidator().search(new Query("?query=sddocname%3Aproduct%20lfmModel25KeysV0%3A9%2A%20mytensor2%3A%3E0"), execution);
public void testValidation() { SearchDefinition sd = new SearchDefinition("test"); sd.addCommand("mytensor1", "type tensor(x[100]"); sd.addCommand("mytensor2", "type tensor<float>(x[100]"); sd.addCommand("mystring", "type string"); IndexModel model = new IndexModel(sd); IndexFacts indexFacts = new IndexFacts(model); Execution execution = new Execution(Execution.Context.createContextStub(indexFacts)); new QueryValidator().search(new Query("?query=mystring:foo"), execution); try { new QueryValidator().search(new Query("?query=mytensor1:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor1': It is a tensor field", e.getMessage()); } try { new QueryValidator().search(new Query("?query=mytensor2:foo"), execution); fail("Expected validation error"); } catch (IllegalArgumentException e) { assertEquals("Cannot search 'mytensor2': It is a tensor field", e.getMessage()); } }
class QueryValidatorTestCase { @Test }
class QueryValidatorTestCase { @Test }
`>= 0`
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { Reindexing.State state = status.state(); metric.set("reindexing.progress", status.progress().map(ProgressToken::percentFinished).map(percentage -> percentage * 1e-2) .orElse(status.state() == SUCCESSFUL ? 1.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(state)))); for (Reindexing.State unset : EnumSet.complementOf(EnumSet.of(state))) metric.set("reindexing.progress", -1, metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(unset)))); }); }
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { Reindexing.State state = status.state(); metric.set("reindexing.progress", status.progress().map(ProgressToken::percentFinished).map(percentage -> percentage * 1e-2) .orElse(status.state() == SUCCESSFUL ? 1.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(state)))); for (Reindexing.State unset : EnumSet.complementOf(EnumSet.of(state))) metric.set("reindexing.progress", -1, metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(unset)))); }); }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "pending"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "pending"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
```suggestion // Set metric value to -1 for all states not currently active, so we only have one value >= 0 at any given time. ```
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { Reindexing.State state = status.state(); metric.set("reindexing.progress", status.progress().map(ProgressToken::percentFinished).map(percentage -> percentage * 1e-2) .orElse(status.state() == SUCCESSFUL ? 1.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(state)))); for (Reindexing.State unset : EnumSet.complementOf(EnumSet.of(state))) metric.set("reindexing.progress", -1, metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(unset)))); }); }
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { Reindexing.State state = status.state(); metric.set("reindexing.progress", status.progress().map(ProgressToken::percentFinished).map(percentage -> percentage * 1e-2) .orElse(status.state() == SUCCESSFUL ? 1.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(state)))); for (Reindexing.State unset : EnumSet.complementOf(EnumSet.of(state))) metric.set("reindexing.progress", -1, metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(unset)))); }); }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "pending"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "pending"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
How do we know when data distribution has to completed if we add new nodes? Since we know from the event what the target is, wouldn't it be better to identify the nodes that we are scaling to (i.e. the `count` nodes that are `active`, not retired and have the same resources as the `cluster.targetResources()`), then for each of those check with cluster controller if the node is in sync?
private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) { if (cluster.lastScalingEvent().isEmpty()) return cluster; var event = cluster.lastScalingEvent().get(); if (event.completion().isPresent()) return cluster; if (clusterNodes.retired().stream() .anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at()))) return cluster; for (NodeTimeseries nodeTimeseries : metricsDb.getNodeTimeseries(event.at(), clusterNodes)) { Optional<MetricSnapshot> firstOnNewGeneration = nodeTimeseries.asList().stream() .filter(snapshot -> snapshot.generation() >= event.generation()).findFirst(); if (firstOnNewGeneration.isEmpty()) return cluster; } Instant completionTime = nodeRepository().clock().instant(); return cluster.with(event.withCompletion(completionTime)); }
private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) { if (cluster.lastScalingEvent().isEmpty()) return cluster; var event = cluster.lastScalingEvent().get(); if (event.completion().isPresent()) return cluster; if (clusterNodes.retired().stream() .anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at()))) return cluster; for (NodeTimeseries nodeTimeseries : metricsDb.getNodeTimeseries(event.at(), clusterNodes)) { Optional<MetricSnapshot> firstOnNewGeneration = nodeTimeseries.asList().stream() .filter(snapshot -> snapshot.generation() >= event.generation()).findFirst(); if (firstOnNewGeneration.isEmpty()) return cluster; } Instant completionTime = nodeRepository().clock().instant(); return cluster.with(event.withCompletion(completionTime)); }
class AutoscalingMaintainer extends NodeRepositoryMaintainer { private final Autoscaler autoscaler; private final MetricsDb metricsDb; private final Deployer deployer; private final Metric metric; public AutoscalingMaintainer(NodeRepository nodeRepository, MetricsDb metricsDb, Deployer deployer, Metric metric, Duration interval) { super(nodeRepository, interval, metric); this.autoscaler = new Autoscaler(metricsDb, nodeRepository); this.metricsDb = metricsDb; this.deployer = deployer; this.metric = metric; } @Override protected boolean maintain() { if ( ! nodeRepository().isWorking()) return false; boolean success = true; if ( ! nodeRepository().zone().environment().isProduction()) return success; activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes)); return success; } private void autoscale(ApplicationId application, List<Node> applicationNodes) { try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) { if ( ! deployment.isValid()) return; nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, NodeList.copyOf(clusterNodes), deployment)); } } private void autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, NodeList clusterNodes, MaintenanceDeployment deployment) { Application application = nodeRepository().applications().get(applicationId).orElse(new Application(applicationId)); if (application.cluster(clusterId).isEmpty()) return; Cluster cluster = application.cluster(clusterId).get(); cluster = updateCompletion(cluster, clusterNodes); var advice = autoscaler.autoscale(cluster, clusterNodes); cluster = cluster.withAutoscalingStatus(advice.reason()); if (advice.isEmpty()) { applications().put(application.with(cluster), deployment.applicationLock().get()); } else if (!cluster.targetResources().equals(advice.target())) { applications().put(application.with(cluster.withTarget(advice.target())), deployment.applicationLock().get()); if (advice.target().isPresent()) { logAutoscaling(advice.target().get(), applicationId, cluster, clusterNodes); deployment.activate(); } } } private Applications applications() { return nodeRepository().applications(); } /** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */ private void logAutoscaling(ClusterResources target, ApplicationId application, Cluster cluster, NodeList clusterNodes) { ClusterResources current = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository(), cluster.exclusive()).toAdvertisedClusterResources(); log.info("Autoscaling " + application + " " + clusterNodes.clusterSpec() + ":" + "\nfrom " + toString(current) + "\nto " + toString(target)); } static String toString(ClusterResources r) { return r + " (total: " + r.totalResources() + ")"; } private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) { return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id())); } }
class AutoscalingMaintainer extends NodeRepositoryMaintainer { private final Autoscaler autoscaler; private final MetricsDb metricsDb; private final Deployer deployer; private final Metric metric; public AutoscalingMaintainer(NodeRepository nodeRepository, MetricsDb metricsDb, Deployer deployer, Metric metric, Duration interval) { super(nodeRepository, interval, metric); this.autoscaler = new Autoscaler(metricsDb, nodeRepository); this.metricsDb = metricsDb; this.deployer = deployer; this.metric = metric; } @Override protected boolean maintain() { if ( ! nodeRepository().isWorking()) return false; boolean success = true; if ( ! nodeRepository().zone().environment().isProduction()) return success; activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes)); return success; } private void autoscale(ApplicationId application, List<Node> applicationNodes) { try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) { if ( ! deployment.isValid()) return; nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, NodeList.copyOf(clusterNodes), deployment)); } } private void autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, NodeList clusterNodes, MaintenanceDeployment deployment) { Application application = nodeRepository().applications().get(applicationId).orElse(new Application(applicationId)); if (application.cluster(clusterId).isEmpty()) return; Cluster cluster = application.cluster(clusterId).get(); cluster = updateCompletion(cluster, clusterNodes); var advice = autoscaler.autoscale(cluster, clusterNodes); cluster = cluster.withAutoscalingStatus(advice.reason()); if (advice.isEmpty()) { applications().put(application.with(cluster), deployment.applicationLock().get()); } else if (!cluster.targetResources().equals(advice.target())) { applications().put(application.with(cluster.withTarget(advice.target())), deployment.applicationLock().get()); if (advice.target().isPresent()) { logAutoscaling(advice.target().get(), applicationId, cluster, clusterNodes); deployment.activate(); } } } private Applications applications() { return nodeRepository().applications(); } /** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */ private void logAutoscaling(ClusterResources target, ApplicationId application, Cluster cluster, NodeList clusterNodes) { ClusterResources current = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository(), cluster.exclusive()).toAdvertisedClusterResources(); log.info("Autoscaling " + application + " " + clusterNodes.clusterSpec() + ":" + "\nfrom " + toString(current) + "\nto " + toString(target)); } static String toString(ClusterResources r) { return r + " (total: " + r.totalResources() + ")"; } private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) { return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id())); } }
Yes, you are right; in that case we need to talk to the cluster controller. Not everything else is in place for content clusters yet either: They don't send the generation metric, and some times apply the new generation too quickly.
private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) { if (cluster.lastScalingEvent().isEmpty()) return cluster; var event = cluster.lastScalingEvent().get(); if (event.completion().isPresent()) return cluster; if (clusterNodes.retired().stream() .anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at()))) return cluster; for (NodeTimeseries nodeTimeseries : metricsDb.getNodeTimeseries(event.at(), clusterNodes)) { Optional<MetricSnapshot> firstOnNewGeneration = nodeTimeseries.asList().stream() .filter(snapshot -> snapshot.generation() >= event.generation()).findFirst(); if (firstOnNewGeneration.isEmpty()) return cluster; } Instant completionTime = nodeRepository().clock().instant(); return cluster.with(event.withCompletion(completionTime)); }
private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) { if (cluster.lastScalingEvent().isEmpty()) return cluster; var event = cluster.lastScalingEvent().get(); if (event.completion().isPresent()) return cluster; if (clusterNodes.retired().stream() .anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at()))) return cluster; for (NodeTimeseries nodeTimeseries : metricsDb.getNodeTimeseries(event.at(), clusterNodes)) { Optional<MetricSnapshot> firstOnNewGeneration = nodeTimeseries.asList().stream() .filter(snapshot -> snapshot.generation() >= event.generation()).findFirst(); if (firstOnNewGeneration.isEmpty()) return cluster; } Instant completionTime = nodeRepository().clock().instant(); return cluster.with(event.withCompletion(completionTime)); }
class AutoscalingMaintainer extends NodeRepositoryMaintainer { private final Autoscaler autoscaler; private final MetricsDb metricsDb; private final Deployer deployer; private final Metric metric; public AutoscalingMaintainer(NodeRepository nodeRepository, MetricsDb metricsDb, Deployer deployer, Metric metric, Duration interval) { super(nodeRepository, interval, metric); this.autoscaler = new Autoscaler(metricsDb, nodeRepository); this.metricsDb = metricsDb; this.deployer = deployer; this.metric = metric; } @Override protected boolean maintain() { if ( ! nodeRepository().isWorking()) return false; boolean success = true; if ( ! nodeRepository().zone().environment().isProduction()) return success; activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes)); return success; } private void autoscale(ApplicationId application, List<Node> applicationNodes) { try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) { if ( ! deployment.isValid()) return; nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, NodeList.copyOf(clusterNodes), deployment)); } } private void autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, NodeList clusterNodes, MaintenanceDeployment deployment) { Application application = nodeRepository().applications().get(applicationId).orElse(new Application(applicationId)); if (application.cluster(clusterId).isEmpty()) return; Cluster cluster = application.cluster(clusterId).get(); cluster = updateCompletion(cluster, clusterNodes); var advice = autoscaler.autoscale(cluster, clusterNodes); cluster = cluster.withAutoscalingStatus(advice.reason()); if (advice.isEmpty()) { applications().put(application.with(cluster), deployment.applicationLock().get()); } else if (!cluster.targetResources().equals(advice.target())) { applications().put(application.with(cluster.withTarget(advice.target())), deployment.applicationLock().get()); if (advice.target().isPresent()) { logAutoscaling(advice.target().get(), applicationId, cluster, clusterNodes); deployment.activate(); } } } private Applications applications() { return nodeRepository().applications(); } /** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */ private void logAutoscaling(ClusterResources target, ApplicationId application, Cluster cluster, NodeList clusterNodes) { ClusterResources current = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository(), cluster.exclusive()).toAdvertisedClusterResources(); log.info("Autoscaling " + application + " " + clusterNodes.clusterSpec() + ":" + "\nfrom " + toString(current) + "\nto " + toString(target)); } static String toString(ClusterResources r) { return r + " (total: " + r.totalResources() + ")"; } private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) { return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id())); } }
class AutoscalingMaintainer extends NodeRepositoryMaintainer { private final Autoscaler autoscaler; private final MetricsDb metricsDb; private final Deployer deployer; private final Metric metric; public AutoscalingMaintainer(NodeRepository nodeRepository, MetricsDb metricsDb, Deployer deployer, Metric metric, Duration interval) { super(nodeRepository, interval, metric); this.autoscaler = new Autoscaler(metricsDb, nodeRepository); this.metricsDb = metricsDb; this.deployer = deployer; this.metric = metric; } @Override protected boolean maintain() { if ( ! nodeRepository().isWorking()) return false; boolean success = true; if ( ! nodeRepository().zone().environment().isProduction()) return success; activeNodesByApplication().forEach((applicationId, nodes) -> autoscale(applicationId, nodes)); return success; } private void autoscale(ApplicationId application, List<Node> applicationNodes) { try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) { if ( ! deployment.isValid()) return; nodesByCluster(applicationNodes).forEach((clusterId, clusterNodes) -> autoscale(application, clusterId, NodeList.copyOf(clusterNodes), deployment)); } } private void autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, NodeList clusterNodes, MaintenanceDeployment deployment) { Application application = nodeRepository().applications().get(applicationId).orElse(new Application(applicationId)); if (application.cluster(clusterId).isEmpty()) return; Cluster cluster = application.cluster(clusterId).get(); cluster = updateCompletion(cluster, clusterNodes); var advice = autoscaler.autoscale(cluster, clusterNodes); cluster = cluster.withAutoscalingStatus(advice.reason()); if (advice.isEmpty()) { applications().put(application.with(cluster), deployment.applicationLock().get()); } else if (!cluster.targetResources().equals(advice.target())) { applications().put(application.with(cluster.withTarget(advice.target())), deployment.applicationLock().get()); if (advice.target().isPresent()) { logAutoscaling(advice.target().get(), applicationId, cluster, clusterNodes); deployment.activate(); } } } private Applications applications() { return nodeRepository().applications(); } /** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */ private void logAutoscaling(ClusterResources target, ApplicationId application, Cluster cluster, NodeList clusterNodes) { ClusterResources current = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository(), cluster.exclusive()).toAdvertisedClusterResources(); log.info("Autoscaling " + application + " " + clusterNodes.clusterSpec() + ":" + "\nfrom " + toString(current) + "\nto " + toString(target)); } static String toString(ClusterResources r) { return r + " (total: " + r.totalResources() + ")"; } private Map<ClusterSpec.Id, List<Node>> nodesByCluster(List<Node> applicationNodes) { return applicationNodes.stream().collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster().id())); } }
perhaps featureFlags() should be directly on the context?
public void testModelContextTest() { ContainerEndpoint endpoint = new ContainerEndpoint("foo", List.of("a", "b")); Set<ContainerEndpoint> endpoints = Collections.singleton(endpoint); InMemoryFlagSource flagSource = new InMemoryFlagSource(); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .multitenant(true) .hostedVespa(false) .build(); ModelContext context = new ModelContextImpl( MockApplicationPackage.createEmpty(), Optional.empty(), Optional.empty(), new BaseDeployLogger(), new StaticConfigDefinitionRepo(), new MockFileRegistry(), Optional.empty(), Optional.empty(), new Provisioned(), new ModelContextImpl.Properties( ApplicationId.defaultId(), configserverConfig, Zone.defaultZone(), endpoints, false, false, flagSource, null, Optional.empty(), Optional.empty(), Optional.empty()), Optional.empty(), Optional.empty(), new Version(7), new Version(8)); assertTrue(context.applicationPackage() instanceof MockApplicationPackage); assertFalse(context.hostProvisioner().isPresent()); assertFalse(context.permanentApplicationPackage().isPresent()); assertFalse(context.previousModel().isPresent()); assertTrue(context.getFileRegistry() instanceof MockFileRegistry); assertTrue(context.configDefinitionRepo() instanceof StaticConfigDefinitionRepo); assertThat(context.properties().applicationId(), is(ApplicationId.defaultId())); assertTrue(context.properties().configServerSpecs().isEmpty()); assertTrue(context.properties().multitenant()); assertNotNull(context.properties().zone()); assertFalse(context.properties().hostedVespa()); assertThat(context.properties().endpoints(), equalTo(endpoints)); assertThat(context.properties().isFirstTimeDeployment(), equalTo(false)); assertEquals(Optional.empty(), context.wantedDockerImageRepo()); assertEquals(new Version(7), context.modelVespaVersion()); assertEquals(new Version(8), context.wantedNodeVespaVersion()); assertEquals(1.0, context.properties().featureFlags().defaultTermwiseLimit(), 0.0); assertFalse(context.properties().featureFlags().useAsyncMessageHandlingOnSchedule()); assertEquals(0, context.properties().featureFlags().contentNodeBucketDBStripeBits()); assertEquals(0x400000, context.properties().featureFlags().mergeChunkSize()); assertEquals(0.5, context.properties().featureFlags().feedConcurrency(), 0.0); }
assertEquals(1.0, context.properties().featureFlags().defaultTermwiseLimit(), 0.0);
public void testModelContextTest() { ContainerEndpoint endpoint = new ContainerEndpoint("foo", List.of("a", "b")); Set<ContainerEndpoint> endpoints = Collections.singleton(endpoint); InMemoryFlagSource flagSource = new InMemoryFlagSource(); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .multitenant(true) .hostedVespa(false) .build(); ModelContext context = new ModelContextImpl( MockApplicationPackage.createEmpty(), Optional.empty(), Optional.empty(), new BaseDeployLogger(), new StaticConfigDefinitionRepo(), new MockFileRegistry(), Optional.empty(), Optional.empty(), new Provisioned(), new ModelContextImpl.Properties( ApplicationId.defaultId(), configserverConfig, Zone.defaultZone(), endpoints, false, false, flagSource, null, Optional.empty(), Optional.empty(), Optional.empty()), Optional.empty(), Optional.empty(), new Version(7), new Version(8)); assertTrue(context.applicationPackage() instanceof MockApplicationPackage); assertFalse(context.hostProvisioner().isPresent()); assertFalse(context.permanentApplicationPackage().isPresent()); assertFalse(context.previousModel().isPresent()); assertTrue(context.getFileRegistry() instanceof MockFileRegistry); assertTrue(context.configDefinitionRepo() instanceof StaticConfigDefinitionRepo); assertThat(context.properties().applicationId(), is(ApplicationId.defaultId())); assertTrue(context.properties().configServerSpecs().isEmpty()); assertTrue(context.properties().multitenant()); assertNotNull(context.properties().zone()); assertFalse(context.properties().hostedVespa()); assertThat(context.properties().endpoints(), equalTo(endpoints)); assertThat(context.properties().isFirstTimeDeployment(), equalTo(false)); assertEquals(Optional.empty(), context.wantedDockerImageRepo()); assertEquals(new Version(7), context.modelVespaVersion()); assertEquals(new Version(8), context.wantedNodeVespaVersion()); assertEquals(1.0, context.properties().featureFlags().defaultTermwiseLimit(), 0.0); assertFalse(context.properties().featureFlags().useAsyncMessageHandlingOnSchedule()); assertEquals(0, context.properties().featureFlags().contentNodeBucketDBStripeBits()); assertEquals(0x400000, context.properties().featureFlags().mergeChunkSize()); assertEquals(0.5, context.properties().featureFlags().feedConcurrency(), 0.0); }
class ModelContextImplTest { @Test }
class ModelContextImplTest { @Test }
That was my original plan, but it turned to out be too clunky to implement.
public void testModelContextTest() { ContainerEndpoint endpoint = new ContainerEndpoint("foo", List.of("a", "b")); Set<ContainerEndpoint> endpoints = Collections.singleton(endpoint); InMemoryFlagSource flagSource = new InMemoryFlagSource(); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .multitenant(true) .hostedVespa(false) .build(); ModelContext context = new ModelContextImpl( MockApplicationPackage.createEmpty(), Optional.empty(), Optional.empty(), new BaseDeployLogger(), new StaticConfigDefinitionRepo(), new MockFileRegistry(), Optional.empty(), Optional.empty(), new Provisioned(), new ModelContextImpl.Properties( ApplicationId.defaultId(), configserverConfig, Zone.defaultZone(), endpoints, false, false, flagSource, null, Optional.empty(), Optional.empty(), Optional.empty()), Optional.empty(), Optional.empty(), new Version(7), new Version(8)); assertTrue(context.applicationPackage() instanceof MockApplicationPackage); assertFalse(context.hostProvisioner().isPresent()); assertFalse(context.permanentApplicationPackage().isPresent()); assertFalse(context.previousModel().isPresent()); assertTrue(context.getFileRegistry() instanceof MockFileRegistry); assertTrue(context.configDefinitionRepo() instanceof StaticConfigDefinitionRepo); assertThat(context.properties().applicationId(), is(ApplicationId.defaultId())); assertTrue(context.properties().configServerSpecs().isEmpty()); assertTrue(context.properties().multitenant()); assertNotNull(context.properties().zone()); assertFalse(context.properties().hostedVespa()); assertThat(context.properties().endpoints(), equalTo(endpoints)); assertThat(context.properties().isFirstTimeDeployment(), equalTo(false)); assertEquals(Optional.empty(), context.wantedDockerImageRepo()); assertEquals(new Version(7), context.modelVespaVersion()); assertEquals(new Version(8), context.wantedNodeVespaVersion()); assertEquals(1.0, context.properties().featureFlags().defaultTermwiseLimit(), 0.0); assertFalse(context.properties().featureFlags().useAsyncMessageHandlingOnSchedule()); assertEquals(0, context.properties().featureFlags().contentNodeBucketDBStripeBits()); assertEquals(0x400000, context.properties().featureFlags().mergeChunkSize()); assertEquals(0.5, context.properties().featureFlags().feedConcurrency(), 0.0); }
assertEquals(1.0, context.properties().featureFlags().defaultTermwiseLimit(), 0.0);
public void testModelContextTest() { ContainerEndpoint endpoint = new ContainerEndpoint("foo", List.of("a", "b")); Set<ContainerEndpoint> endpoints = Collections.singleton(endpoint); InMemoryFlagSource flagSource = new InMemoryFlagSource(); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .multitenant(true) .hostedVespa(false) .build(); ModelContext context = new ModelContextImpl( MockApplicationPackage.createEmpty(), Optional.empty(), Optional.empty(), new BaseDeployLogger(), new StaticConfigDefinitionRepo(), new MockFileRegistry(), Optional.empty(), Optional.empty(), new Provisioned(), new ModelContextImpl.Properties( ApplicationId.defaultId(), configserverConfig, Zone.defaultZone(), endpoints, false, false, flagSource, null, Optional.empty(), Optional.empty(), Optional.empty()), Optional.empty(), Optional.empty(), new Version(7), new Version(8)); assertTrue(context.applicationPackage() instanceof MockApplicationPackage); assertFalse(context.hostProvisioner().isPresent()); assertFalse(context.permanentApplicationPackage().isPresent()); assertFalse(context.previousModel().isPresent()); assertTrue(context.getFileRegistry() instanceof MockFileRegistry); assertTrue(context.configDefinitionRepo() instanceof StaticConfigDefinitionRepo); assertThat(context.properties().applicationId(), is(ApplicationId.defaultId())); assertTrue(context.properties().configServerSpecs().isEmpty()); assertTrue(context.properties().multitenant()); assertNotNull(context.properties().zone()); assertFalse(context.properties().hostedVespa()); assertThat(context.properties().endpoints(), equalTo(endpoints)); assertThat(context.properties().isFirstTimeDeployment(), equalTo(false)); assertEquals(Optional.empty(), context.wantedDockerImageRepo()); assertEquals(new Version(7), context.modelVespaVersion()); assertEquals(new Version(8), context.wantedNodeVespaVersion()); assertEquals(1.0, context.properties().featureFlags().defaultTermwiseLimit(), 0.0); assertFalse(context.properties().featureFlags().useAsyncMessageHandlingOnSchedule()); assertEquals(0, context.properties().featureFlags().contentNodeBucketDBStripeBits()); assertEquals(0x400000, context.properties().featureFlags().mergeChunkSize()); assertEquals(0.5, context.properties().featureFlags().feedConcurrency(), 0.0); }
class ModelContextImplTest { @Test }
class ModelContextImplTest { @Test }
A bit weird to have a NPE as part of the expected behaviour
void testParsing() { DocumentmanagerConfig musicConfig = Deriver.getDocumentManagerConfig("src/test/resources/schemas/music.sd").build(); DocumentTypeManager manager = new DocumentTypeManager(musicConfig); assertEquals(Map.of(manager.getDocumentType("music"), Instant.ofEpochMilli(123)), parseReady(new ReindexingConfig.Builder() .enabled(true) .clusterName("cluster") .status("music", new ReindexingConfig.Status.Builder().readyAtMillis(123)) .build(), manager)); assertThrows(NullPointerException.class, () -> parseReady(new ReindexingConfig.Builder() .clusterName("cluster") .status("poetry", new ReindexingConfig.Status.Builder().readyAtMillis(123)) .build(), manager)); assertEquals(new Cluster("cluster", "configId", Map.of(manager.getDocumentType("music"), "default")), parseCluster("cluster", new ClusterListConfig.Builder() .storage(new ClusterListConfig.Storage.Builder() .name("oyster") .configid("configId")) .storage(new ClusterListConfig.Storage.Builder() .name("cluster") .configid("configId")) .build(), new AllClustersBucketSpacesConfig.Builder() .cluster("oyster", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace("global"))) .cluster("cluster", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace("default"))) .build(), manager)); assertThrows(NullPointerException.class, () -> parseCluster("cluster", new ClusterListConfig.Builder() .storage(new ClusterListConfig.Storage.Builder() .name("cluster") .configid("configId")) .build(), new AllClustersBucketSpacesConfig.Builder() .cluster("oyster", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace("global"))) .build(), manager)); assertThrows(IllegalStateException.class, () -> parseCluster("cluster", new ClusterListConfig.Builder() .storage(new ClusterListConfig.Storage.Builder() .name("oyster") .configid("configId")) .build(), new AllClustersBucketSpacesConfig.Builder() .cluster("cluster", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace("default"))) .build(), manager)); }
assertThrows(NullPointerException.class,
void testParsing() { DocumentmanagerConfig musicConfig = Deriver.getDocumentManagerConfig("src/test/resources/schemas/music.sd").build(); DocumentTypeManager manager = new DocumentTypeManager(musicConfig); assertEquals(Map.of(manager.getDocumentType("music"), Instant.ofEpochMilli(123)), parseReady(new ReindexingConfig.Clusters.Builder() .documentTypes("music", new ReindexingConfig.Clusters.DocumentTypes.Builder().readyAtMillis(123)) .build(), manager)); assertThrows(NullPointerException.class, () -> parseReady(new ReindexingConfig.Clusters.Builder() .documentTypes("poetry", new ReindexingConfig.Clusters.DocumentTypes.Builder().readyAtMillis(123)) .build(), manager)); assertEquals(new Cluster("cluster", "configId", Map.of(manager.getDocumentType("music"), "default")), parseCluster("cluster", new ClusterListConfig.Builder() .storage(new ClusterListConfig.Storage.Builder() .name("oyster") .configid("configId")) .storage(new ClusterListConfig.Storage.Builder() .name("cluster") .configid("configId")) .build(), new AllClustersBucketSpacesConfig.Builder() .cluster("oyster", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace("global"))) .cluster("cluster", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace("default"))) .build(), manager)); assertThrows(NullPointerException.class, () -> parseCluster("cluster", new ClusterListConfig.Builder() .storage(new ClusterListConfig.Storage.Builder() .name("cluster") .configid("configId")) .build(), new AllClustersBucketSpacesConfig.Builder() .cluster("oyster", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace("global"))) .build(), manager)); assertThrows(IllegalStateException.class, () -> parseCluster("cluster", new ClusterListConfig.Builder() .storage(new ClusterListConfig.Storage.Builder() .name("oyster") .configid("configId")) .build(), new AllClustersBucketSpacesConfig.Builder() .cluster("cluster", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace("default"))) .build(), manager)); }
class ReindexingMaintainerTest { @Test @Test void testStaggering() { scheduleStaggered((delayMillis, intervalMillis) -> { assertEquals(0, delayMillis); assertEquals(10, intervalMillis); }, Duration.ofMillis(10), Instant.ofEpochMilli(27), "host", "nys:123,hark:123"); scheduleStaggered((delayMillis, intervalMillis) -> { assertEquals(3, delayMillis); assertEquals(10, intervalMillis); }, Duration.ofMillis(10), Instant.ofEpochMilli(27), "host", "host:123"); scheduleStaggered((delayMillis, intervalMillis) -> { assertEquals(7, delayMillis); assertEquals(20, intervalMillis); }, Duration.ofMillis(10), Instant.ofEpochMilli(13), "host", "host:123,:nys:321"); scheduleStaggered((delayMillis, intervalMillis) -> { assertEquals(17, delayMillis); assertEquals(20, intervalMillis); }, Duration.ofMillis(10), Instant.ofEpochMilli(13), "nys", "host:123,nys:321"); } }
class ReindexingMaintainerTest { @Test @Test void testStaggering() { scheduleStaggered((delayMillis, intervalMillis) -> { assertEquals(0, delayMillis); assertEquals(10, intervalMillis); }, Duration.ofMillis(10), Instant.ofEpochMilli(27), "host", "nys:123,hark:123"); scheduleStaggered((delayMillis, intervalMillis) -> { assertEquals(3, delayMillis); assertEquals(10, intervalMillis); }, Duration.ofMillis(10), Instant.ofEpochMilli(27), "host", "host:123"); scheduleStaggered((delayMillis, intervalMillis) -> { assertEquals(7, delayMillis); assertEquals(20, intervalMillis); }, Duration.ofMillis(10), Instant.ofEpochMilli(13), "host", "host:123,:nys:321"); scheduleStaggered((delayMillis, intervalMillis) -> { assertEquals(17, delayMillis); assertEquals(20, intervalMillis); }, Duration.ofMillis(10), Instant.ofEpochMilli(13), "nys", "host:123,nys:321"); } }
Consider replacing SimpleZone with (a method that defines) a mock(ZoneApi.class) that has `when()` defined on getSystemName() and getId()?
private static ZoneRegistry createZoneRegistryMock() { ZoneRegistry registryMock = mock(ZoneRegistry.class); when(registryMock.system()).thenReturn(SystemName.main); when(registryMock.getConfigServerVipUri(any())).thenReturn(URI.create("http: when(registryMock.getConfigServerHttpsIdentity(any())).thenReturn(new AthenzService("domain", "servicename")); ZoneList zoneListMock = mock(ZoneList.class); when(zoneListMock.reachable()).thenReturn(zoneListMock); when(zoneListMock.zones()).thenReturn((List)List.of(new SimpleZone("prod.us-west-1"), new SimpleZone("prod.us-east-3"))); when(registryMock.zones()).thenReturn(zoneListMock); return registryMock; }
when(zoneListMock.zones()).thenReturn((List)List.of(new SimpleZone("prod.us-west-1"), new SimpleZone("prod.us-east-3")));
private static ZoneRegistry createZoneRegistryMock() { ZoneRegistry registryMock = mock(ZoneRegistry.class); when(registryMock.system()).thenReturn(SystemName.main); when(registryMock.getConfigServerVipUri(any())).thenReturn(URI.create("http: when(registryMock.getConfigServerHttpsIdentity(any())).thenReturn(new AthenzService("domain", "servicename")); ZoneList zoneListMock = mock(ZoneList.class); when(zoneListMock.reachable()).thenReturn(zoneListMock); when(zoneListMock.zones()).thenReturn((List)List.of(new SimpleZone("prod.us-west-1"), new SimpleZone("prod.us-east-3"))); when(registryMock.zones()).thenReturn(zoneListMock); return registryMock; }
class SystemFlagsDataArchiveTest { private static final SystemName SYSTEM = SystemName.main; private static final FlagId MY_TEST_FLAG = new FlagId("my-test-flag"); private static final FlagId FLAG_WITH_EMPTY_DATA = new FlagId("flag-with-empty-data"); @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public final ExpectedException expectedException = ExpectedException.none(); private static final FlagsTarget mainControllerTarget = FlagsTarget.forController(SYSTEM); private static final FlagsTarget cdControllerTarget = FlagsTarget.forController(SystemName.cd); private static final FlagsTarget prodUsWestCfgTarget = createConfigserverTarget(Environment.prod, "us-west-1"); private static final FlagsTarget prodUsEast3CfgTarget = createConfigserverTarget(Environment.prod, "us-east-3"); private static final FlagsTarget devUsEast1CfgTarget = createConfigserverTarget(Environment.dev, "us-east-1"); private static FlagsTarget createConfigserverTarget(Environment environment, String region) { return new ConfigServerFlagsTarget( SYSTEM, ZoneId.from(environment, RegionName.from(region)), URI.create("https: new AthenzService("vespa.cfg-" + region)); } @Test public void can_serialize_and_deserialize_archive() throws IOException { File tempFile = temporaryFolder.newFile("serialized-flags-archive"); try (OutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile))) { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); archive.toZip(out); } try (InputStream in = new BufferedInputStream(new FileInputStream(tempFile))) { SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromZip(in); assertArchiveReturnsCorrectTestFlagDataForTarget(archive); } } @Test public void retrieves_correct_flag_data_for_target() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); assertArchiveReturnsCorrectTestFlagDataForTarget(archive); } @Test public void supports_multi_level_flags_directory() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level/")); assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default"); } @Test public void duplicated_flagdata_is_detected() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("contains redundant flag data for id 'my-test-flag' already set in another directory!"); var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level-with-duplicated-flagdata/")); } @Test public void empty_files_are_handled_as_no_flag_data_for_target() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, mainControllerTarget); assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, prodUsWestCfgTarget, "main.prod.us-west-1"); assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, prodUsEast3CfgTarget); assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, devUsEast1CfgTarget, "main"); } @Test public void throws_exception_on_non_json_file() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Only JSON files are allowed in 'flags/' directory (found 'flags/my-test-flag/file-name-without-dot-json')"); SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-invalid-file-name/")); } @Test public void throws_exception_on_unknown_file() { SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-file-name/")); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Unknown flag file: flags/my-test-flag/main.prod.unknown-region.json"); archive.validateAllFilesAreForTargets(SystemName.main, Set.of(mainControllerTarget, prodUsWestCfgTarget)); } @Test public void throws_on_unknown_field() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "flags/my-test-flag/main.prod.us-west-1.json contains unknown non-comment fields: after removing any comment fields the JSON is:\n" + " {\"id\":\"my-test-flag\",\"rules\":[{\"condition\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"foo.com\"]}],\"value\":\"default\"}]}\n" + "but deserializing this ended up with a JSON that are missing some of the fields:\n" + " {\"id\":\"my-test-flag\",\"rules\":[{\"value\":\"default\"}]}\n" + "See https: SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-field-name/")); } @Test public void remove_comments() { assertTrue(JSON.equals("{\n" + " \"a\": {\n" + " \"b\": 1\n" + " },\n" + " \"list\": [\n" + " {\n" + " \"c\": 2\n" + " },\n" + " {\n" + " }\n" + " ]\n" + "}", SystemFlagsDataArchive.normalizeJson("{\n" + " \"comment\": \"comment a\",\n" + " \"a\": {\n" + " \"comment\": \"comment b\",\n" + " \"b\": 1\n" + " },\n" + " \"list\": [\n" + " {\n" + " \"comment\": \"comment c\",\n" + " \"c\": 2\n" + " },\n" + " {\n" + " \"comment\": \"comment d\"\n" + " }\n" + " ]\n" + "}"))); } @Test public void normalize_json_fail_on_invalid_application() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"application\",\n" + " \"values\": [ \"a.b.c\" ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Application ids must be on the form tenant:application:instance, but was a.b.c", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_node_type() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"node-type\",\n" + " \"values\": [ \"footype\" ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("No enum constant com.yahoo.config.provision.NodeType.footype", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_email() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"console-user-email\",\n" + " \"values\": [ 123 ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Non-string email address: 123", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_tenant_id() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"tenant\",\n" + " \"values\": [ 123 ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Non-string tenant ID: 123", e.getMessage()); } } @Test public void ignores_files_not_related_to_specified_system_definition() { ZoneRegistry registry = createZoneRegistryMock(); Path testDirectory = Paths.get("src/test/resources/system-flags-for-multiple-systems/"); var archive = SystemFlagsDataArchive.fromDirectoryAndSystem(testDirectory, registry); assertFlagDataHasValue(archive, MY_TEST_FLAG, cdControllerTarget, "default"); assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1"); } @SuppressWarnings("unchecked") private static void assertArchiveReturnsCorrectTestFlagDataForTarget(SystemFlagsDataArchive archive) { assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "main.controller"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsEast3CfgTarget, "main.prod"); assertFlagDataHasValue(archive, MY_TEST_FLAG, devUsEast1CfgTarget, "main"); } private static void assertFlagDataHasValue(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target, String value) { List<FlagData> data = getData(archive, flagId, target); assertEquals(1, data.size()); FlagData flagData = data.get(0); RawFlag rawFlag = flagData.resolve(FetchVector.fromMap(Map.of())).get(); assertEquals(String.format("\"%s\"", value), rawFlag.asJson()); } private static void assertNoFlagData(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target) { List<FlagData> data = getData(archive, flagId, target); assertTrue(data.isEmpty()); } private static List<FlagData> getData(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target) { return archive.flagData(target).stream() .filter(d -> d.id().equals(flagId)) .collect(toList()); } private static class SimpleZone implements ZoneApi { final ZoneId zoneId; SimpleZone(String zoneId) { this.zoneId = ZoneId.from(zoneId); } @Override public SystemName getSystemName() { return SystemName.main; } @Override public ZoneId getId() { return zoneId; } @Override public CloudName getCloudName() { throw new UnsupportedOperationException(); } @Override public String getCloudNativeRegionName() { throw new UnsupportedOperationException(); } } }
class SystemFlagsDataArchiveTest { private static final SystemName SYSTEM = SystemName.main; private static final FlagId MY_TEST_FLAG = new FlagId("my-test-flag"); private static final FlagId FLAG_WITH_EMPTY_DATA = new FlagId("flag-with-empty-data"); @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public final ExpectedException expectedException = ExpectedException.none(); private static final FlagsTarget mainControllerTarget = FlagsTarget.forController(SYSTEM); private static final FlagsTarget cdControllerTarget = FlagsTarget.forController(SystemName.cd); private static final FlagsTarget prodUsWestCfgTarget = createConfigserverTarget(Environment.prod, "us-west-1"); private static final FlagsTarget prodUsEast3CfgTarget = createConfigserverTarget(Environment.prod, "us-east-3"); private static final FlagsTarget devUsEast1CfgTarget = createConfigserverTarget(Environment.dev, "us-east-1"); private static FlagsTarget createConfigserverTarget(Environment environment, String region) { return new ConfigServerFlagsTarget( SYSTEM, ZoneId.from(environment, RegionName.from(region)), URI.create("https: new AthenzService("vespa.cfg-" + region)); } @Test public void can_serialize_and_deserialize_archive() throws IOException { File tempFile = temporaryFolder.newFile("serialized-flags-archive"); try (OutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile))) { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); archive.toZip(out); } try (InputStream in = new BufferedInputStream(new FileInputStream(tempFile))) { SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromZip(in); assertArchiveReturnsCorrectTestFlagDataForTarget(archive); } } @Test public void retrieves_correct_flag_data_for_target() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); assertArchiveReturnsCorrectTestFlagDataForTarget(archive); } @Test public void supports_multi_level_flags_directory() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level/")); assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default"); } @Test public void duplicated_flagdata_is_detected() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("contains redundant flag data for id 'my-test-flag' already set in another directory!"); var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level-with-duplicated-flagdata/")); } @Test public void empty_files_are_handled_as_no_flag_data_for_target() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, mainControllerTarget); assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, prodUsWestCfgTarget, "main.prod.us-west-1"); assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, prodUsEast3CfgTarget); assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, devUsEast1CfgTarget, "main"); } @Test public void throws_exception_on_non_json_file() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Only JSON files are allowed in 'flags/' directory (found 'flags/my-test-flag/file-name-without-dot-json')"); SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-invalid-file-name/")); } @Test public void throws_exception_on_unknown_file() { SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-file-name/")); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Unknown flag file: flags/my-test-flag/main.prod.unknown-region.json"); archive.validateAllFilesAreForTargets(SystemName.main, Set.of(mainControllerTarget, prodUsWestCfgTarget)); } @Test public void throws_on_unknown_field() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "flags/my-test-flag/main.prod.us-west-1.json contains unknown non-comment fields: after removing any comment fields the JSON is:\n" + " {\"id\":\"my-test-flag\",\"rules\":[{\"condition\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"foo.com\"]}],\"value\":\"default\"}]}\n" + "but deserializing this ended up with a JSON that are missing some of the fields:\n" + " {\"id\":\"my-test-flag\",\"rules\":[{\"value\":\"default\"}]}\n" + "See https: SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-field-name/")); } @Test public void remove_comments() { assertTrue(JSON.equals("{\n" + " \"a\": {\n" + " \"b\": 1\n" + " },\n" + " \"list\": [\n" + " {\n" + " \"c\": 2\n" + " },\n" + " {\n" + " }\n" + " ]\n" + "}", SystemFlagsDataArchive.normalizeJson("{\n" + " \"comment\": \"comment a\",\n" + " \"a\": {\n" + " \"comment\": \"comment b\",\n" + " \"b\": 1\n" + " },\n" + " \"list\": [\n" + " {\n" + " \"comment\": \"comment c\",\n" + " \"c\": 2\n" + " },\n" + " {\n" + " \"comment\": \"comment d\"\n" + " }\n" + " ]\n" + "}"))); } @Test public void normalize_json_fail_on_invalid_application() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"application\",\n" + " \"values\": [ \"a.b.c\" ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Application ids must be on the form tenant:application:instance, but was a.b.c", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_node_type() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"node-type\",\n" + " \"values\": [ \"footype\" ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("No enum constant com.yahoo.config.provision.NodeType.footype", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_email() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"console-user-email\",\n" + " \"values\": [ 123 ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Non-string email address: 123", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_tenant_id() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"tenant\",\n" + " \"values\": [ 123 ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Non-string tenant ID: 123", e.getMessage()); } } @Test public void ignores_files_not_related_to_specified_system_definition() { ZoneRegistry registry = createZoneRegistryMock(); Path testDirectory = Paths.get("src/test/resources/system-flags-for-multiple-systems/"); var archive = SystemFlagsDataArchive.fromDirectoryAndSystem(testDirectory, registry); assertFlagDataHasValue(archive, MY_TEST_FLAG, cdControllerTarget, "default"); assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1"); } @SuppressWarnings("unchecked") private static void assertArchiveReturnsCorrectTestFlagDataForTarget(SystemFlagsDataArchive archive) { assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "main.controller"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsEast3CfgTarget, "main.prod"); assertFlagDataHasValue(archive, MY_TEST_FLAG, devUsEast1CfgTarget, "main"); } private static void assertFlagDataHasValue(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target, String value) { List<FlagData> data = getData(archive, flagId, target); assertEquals(1, data.size()); FlagData flagData = data.get(0); RawFlag rawFlag = flagData.resolve(FetchVector.fromMap(Map.of())).get(); assertEquals(String.format("\"%s\"", value), rawFlag.asJson()); } private static void assertNoFlagData(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target) { List<FlagData> data = getData(archive, flagId, target); assertTrue(data.isEmpty()); } private static List<FlagData> getData(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target) { return archive.flagData(target).stream() .filter(d -> d.id().equals(flagId)) .collect(toList()); } private static class SimpleZone implements ZoneApi { final ZoneId zoneId; SimpleZone(String zoneId) { this.zoneId = ZoneId.from(zoneId); } @Override public SystemName getSystemName() { return SystemName.main; } @Override public ZoneId getId() { return zoneId; } @Override public CloudName getCloudName() { throw new UnsupportedOperationException(); } @Override public String getCloudNativeRegionName() { throw new UnsupportedOperationException(); } } }
I don't have a strong opinion about this, using Mockito for all mocks looks more consistent. As Mockito is somewhat controversial among Vespa devs, I try to not use it unless it's the superior choice.
private static ZoneRegistry createZoneRegistryMock() { ZoneRegistry registryMock = mock(ZoneRegistry.class); when(registryMock.system()).thenReturn(SystemName.main); when(registryMock.getConfigServerVipUri(any())).thenReturn(URI.create("http: when(registryMock.getConfigServerHttpsIdentity(any())).thenReturn(new AthenzService("domain", "servicename")); ZoneList zoneListMock = mock(ZoneList.class); when(zoneListMock.reachable()).thenReturn(zoneListMock); when(zoneListMock.zones()).thenReturn((List)List.of(new SimpleZone("prod.us-west-1"), new SimpleZone("prod.us-east-3"))); when(registryMock.zones()).thenReturn(zoneListMock); return registryMock; }
when(zoneListMock.zones()).thenReturn((List)List.of(new SimpleZone("prod.us-west-1"), new SimpleZone("prod.us-east-3")));
private static ZoneRegistry createZoneRegistryMock() { ZoneRegistry registryMock = mock(ZoneRegistry.class); when(registryMock.system()).thenReturn(SystemName.main); when(registryMock.getConfigServerVipUri(any())).thenReturn(URI.create("http: when(registryMock.getConfigServerHttpsIdentity(any())).thenReturn(new AthenzService("domain", "servicename")); ZoneList zoneListMock = mock(ZoneList.class); when(zoneListMock.reachable()).thenReturn(zoneListMock); when(zoneListMock.zones()).thenReturn((List)List.of(new SimpleZone("prod.us-west-1"), new SimpleZone("prod.us-east-3"))); when(registryMock.zones()).thenReturn(zoneListMock); return registryMock; }
class SystemFlagsDataArchiveTest { private static final SystemName SYSTEM = SystemName.main; private static final FlagId MY_TEST_FLAG = new FlagId("my-test-flag"); private static final FlagId FLAG_WITH_EMPTY_DATA = new FlagId("flag-with-empty-data"); @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public final ExpectedException expectedException = ExpectedException.none(); private static final FlagsTarget mainControllerTarget = FlagsTarget.forController(SYSTEM); private static final FlagsTarget cdControllerTarget = FlagsTarget.forController(SystemName.cd); private static final FlagsTarget prodUsWestCfgTarget = createConfigserverTarget(Environment.prod, "us-west-1"); private static final FlagsTarget prodUsEast3CfgTarget = createConfigserverTarget(Environment.prod, "us-east-3"); private static final FlagsTarget devUsEast1CfgTarget = createConfigserverTarget(Environment.dev, "us-east-1"); private static FlagsTarget createConfigserverTarget(Environment environment, String region) { return new ConfigServerFlagsTarget( SYSTEM, ZoneId.from(environment, RegionName.from(region)), URI.create("https: new AthenzService("vespa.cfg-" + region)); } @Test public void can_serialize_and_deserialize_archive() throws IOException { File tempFile = temporaryFolder.newFile("serialized-flags-archive"); try (OutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile))) { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); archive.toZip(out); } try (InputStream in = new BufferedInputStream(new FileInputStream(tempFile))) { SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromZip(in); assertArchiveReturnsCorrectTestFlagDataForTarget(archive); } } @Test public void retrieves_correct_flag_data_for_target() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); assertArchiveReturnsCorrectTestFlagDataForTarget(archive); } @Test public void supports_multi_level_flags_directory() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level/")); assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default"); } @Test public void duplicated_flagdata_is_detected() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("contains redundant flag data for id 'my-test-flag' already set in another directory!"); var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level-with-duplicated-flagdata/")); } @Test public void empty_files_are_handled_as_no_flag_data_for_target() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, mainControllerTarget); assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, prodUsWestCfgTarget, "main.prod.us-west-1"); assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, prodUsEast3CfgTarget); assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, devUsEast1CfgTarget, "main"); } @Test public void throws_exception_on_non_json_file() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Only JSON files are allowed in 'flags/' directory (found 'flags/my-test-flag/file-name-without-dot-json')"); SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-invalid-file-name/")); } @Test public void throws_exception_on_unknown_file() { SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-file-name/")); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Unknown flag file: flags/my-test-flag/main.prod.unknown-region.json"); archive.validateAllFilesAreForTargets(SystemName.main, Set.of(mainControllerTarget, prodUsWestCfgTarget)); } @Test public void throws_on_unknown_field() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "flags/my-test-flag/main.prod.us-west-1.json contains unknown non-comment fields: after removing any comment fields the JSON is:\n" + " {\"id\":\"my-test-flag\",\"rules\":[{\"condition\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"foo.com\"]}],\"value\":\"default\"}]}\n" + "but deserializing this ended up with a JSON that are missing some of the fields:\n" + " {\"id\":\"my-test-flag\",\"rules\":[{\"value\":\"default\"}]}\n" + "See https: SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-field-name/")); } @Test public void remove_comments() { assertTrue(JSON.equals("{\n" + " \"a\": {\n" + " \"b\": 1\n" + " },\n" + " \"list\": [\n" + " {\n" + " \"c\": 2\n" + " },\n" + " {\n" + " }\n" + " ]\n" + "}", SystemFlagsDataArchive.normalizeJson("{\n" + " \"comment\": \"comment a\",\n" + " \"a\": {\n" + " \"comment\": \"comment b\",\n" + " \"b\": 1\n" + " },\n" + " \"list\": [\n" + " {\n" + " \"comment\": \"comment c\",\n" + " \"c\": 2\n" + " },\n" + " {\n" + " \"comment\": \"comment d\"\n" + " }\n" + " ]\n" + "}"))); } @Test public void normalize_json_fail_on_invalid_application() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"application\",\n" + " \"values\": [ \"a.b.c\" ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Application ids must be on the form tenant:application:instance, but was a.b.c", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_node_type() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"node-type\",\n" + " \"values\": [ \"footype\" ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("No enum constant com.yahoo.config.provision.NodeType.footype", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_email() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"console-user-email\",\n" + " \"values\": [ 123 ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Non-string email address: 123", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_tenant_id() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"tenant\",\n" + " \"values\": [ 123 ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Non-string tenant ID: 123", e.getMessage()); } } @Test public void ignores_files_not_related_to_specified_system_definition() { ZoneRegistry registry = createZoneRegistryMock(); Path testDirectory = Paths.get("src/test/resources/system-flags-for-multiple-systems/"); var archive = SystemFlagsDataArchive.fromDirectoryAndSystem(testDirectory, registry); assertFlagDataHasValue(archive, MY_TEST_FLAG, cdControllerTarget, "default"); assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1"); } @SuppressWarnings("unchecked") private static void assertArchiveReturnsCorrectTestFlagDataForTarget(SystemFlagsDataArchive archive) { assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "main.controller"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsEast3CfgTarget, "main.prod"); assertFlagDataHasValue(archive, MY_TEST_FLAG, devUsEast1CfgTarget, "main"); } private static void assertFlagDataHasValue(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target, String value) { List<FlagData> data = getData(archive, flagId, target); assertEquals(1, data.size()); FlagData flagData = data.get(0); RawFlag rawFlag = flagData.resolve(FetchVector.fromMap(Map.of())).get(); assertEquals(String.format("\"%s\"", value), rawFlag.asJson()); } private static void assertNoFlagData(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target) { List<FlagData> data = getData(archive, flagId, target); assertTrue(data.isEmpty()); } private static List<FlagData> getData(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target) { return archive.flagData(target).stream() .filter(d -> d.id().equals(flagId)) .collect(toList()); } private static class SimpleZone implements ZoneApi { final ZoneId zoneId; SimpleZone(String zoneId) { this.zoneId = ZoneId.from(zoneId); } @Override public SystemName getSystemName() { return SystemName.main; } @Override public ZoneId getId() { return zoneId; } @Override public CloudName getCloudName() { throw new UnsupportedOperationException(); } @Override public String getCloudNativeRegionName() { throw new UnsupportedOperationException(); } } }
class SystemFlagsDataArchiveTest { private static final SystemName SYSTEM = SystemName.main; private static final FlagId MY_TEST_FLAG = new FlagId("my-test-flag"); private static final FlagId FLAG_WITH_EMPTY_DATA = new FlagId("flag-with-empty-data"); @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public final ExpectedException expectedException = ExpectedException.none(); private static final FlagsTarget mainControllerTarget = FlagsTarget.forController(SYSTEM); private static final FlagsTarget cdControllerTarget = FlagsTarget.forController(SystemName.cd); private static final FlagsTarget prodUsWestCfgTarget = createConfigserverTarget(Environment.prod, "us-west-1"); private static final FlagsTarget prodUsEast3CfgTarget = createConfigserverTarget(Environment.prod, "us-east-3"); private static final FlagsTarget devUsEast1CfgTarget = createConfigserverTarget(Environment.dev, "us-east-1"); private static FlagsTarget createConfigserverTarget(Environment environment, String region) { return new ConfigServerFlagsTarget( SYSTEM, ZoneId.from(environment, RegionName.from(region)), URI.create("https: new AthenzService("vespa.cfg-" + region)); } @Test public void can_serialize_and_deserialize_archive() throws IOException { File tempFile = temporaryFolder.newFile("serialized-flags-archive"); try (OutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile))) { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); archive.toZip(out); } try (InputStream in = new BufferedInputStream(new FileInputStream(tempFile))) { SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromZip(in); assertArchiveReturnsCorrectTestFlagDataForTarget(archive); } } @Test public void retrieves_correct_flag_data_for_target() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); assertArchiveReturnsCorrectTestFlagDataForTarget(archive); } @Test public void supports_multi_level_flags_directory() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level/")); assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default"); } @Test public void duplicated_flagdata_is_detected() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("contains redundant flag data for id 'my-test-flag' already set in another directory!"); var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level-with-duplicated-flagdata/")); } @Test public void empty_files_are_handled_as_no_flag_data_for_target() { var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/")); assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, mainControllerTarget); assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, prodUsWestCfgTarget, "main.prod.us-west-1"); assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, prodUsEast3CfgTarget); assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, devUsEast1CfgTarget, "main"); } @Test public void throws_exception_on_non_json_file() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Only JSON files are allowed in 'flags/' directory (found 'flags/my-test-flag/file-name-without-dot-json')"); SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-invalid-file-name/")); } @Test public void throws_exception_on_unknown_file() { SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-file-name/")); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Unknown flag file: flags/my-test-flag/main.prod.unknown-region.json"); archive.validateAllFilesAreForTargets(SystemName.main, Set.of(mainControllerTarget, prodUsWestCfgTarget)); } @Test public void throws_on_unknown_field() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "flags/my-test-flag/main.prod.us-west-1.json contains unknown non-comment fields: after removing any comment fields the JSON is:\n" + " {\"id\":\"my-test-flag\",\"rules\":[{\"condition\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"foo.com\"]}],\"value\":\"default\"}]}\n" + "but deserializing this ended up with a JSON that are missing some of the fields:\n" + " {\"id\":\"my-test-flag\",\"rules\":[{\"value\":\"default\"}]}\n" + "See https: SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-field-name/")); } @Test public void remove_comments() { assertTrue(JSON.equals("{\n" + " \"a\": {\n" + " \"b\": 1\n" + " },\n" + " \"list\": [\n" + " {\n" + " \"c\": 2\n" + " },\n" + " {\n" + " }\n" + " ]\n" + "}", SystemFlagsDataArchive.normalizeJson("{\n" + " \"comment\": \"comment a\",\n" + " \"a\": {\n" + " \"comment\": \"comment b\",\n" + " \"b\": 1\n" + " },\n" + " \"list\": [\n" + " {\n" + " \"comment\": \"comment c\",\n" + " \"c\": 2\n" + " },\n" + " {\n" + " \"comment\": \"comment d\"\n" + " }\n" + " ]\n" + "}"))); } @Test public void normalize_json_fail_on_invalid_application() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"application\",\n" + " \"values\": [ \"a.b.c\" ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Application ids must be on the form tenant:application:instance, but was a.b.c", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_node_type() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"node-type\",\n" + " \"values\": [ \"footype\" ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("No enum constant com.yahoo.config.provision.NodeType.footype", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_email() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"console-user-email\",\n" + " \"values\": [ 123 ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Non-string email address: 123", e.getMessage()); } } @Test public void normalize_json_fail_on_invalid_tenant_id() { try { SystemFlagsDataArchive.normalizeJson("{\n" + " \"id\": \"foo\",\n" + " \"rules\": [\n" + " {\n" + " \"conditions\": [\n" + " {\n" + " \"type\": \"whitelist\",\n" + " \"dimension\": \"tenant\",\n" + " \"values\": [ 123 ]\n" + " }\n" + " ],\n" + " \"value\": true\n" + " }\n" + " ]\n" + "}\n"); fail(); } catch (IllegalArgumentException e) { assertEquals("Non-string tenant ID: 123", e.getMessage()); } } @Test public void ignores_files_not_related_to_specified_system_definition() { ZoneRegistry registry = createZoneRegistryMock(); Path testDirectory = Paths.get("src/test/resources/system-flags-for-multiple-systems/"); var archive = SystemFlagsDataArchive.fromDirectoryAndSystem(testDirectory, registry); assertFlagDataHasValue(archive, MY_TEST_FLAG, cdControllerTarget, "default"); assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1"); } @SuppressWarnings("unchecked") private static void assertArchiveReturnsCorrectTestFlagDataForTarget(SystemFlagsDataArchive archive) { assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "main.controller"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1"); assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsEast3CfgTarget, "main.prod"); assertFlagDataHasValue(archive, MY_TEST_FLAG, devUsEast1CfgTarget, "main"); } private static void assertFlagDataHasValue(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target, String value) { List<FlagData> data = getData(archive, flagId, target); assertEquals(1, data.size()); FlagData flagData = data.get(0); RawFlag rawFlag = flagData.resolve(FetchVector.fromMap(Map.of())).get(); assertEquals(String.format("\"%s\"", value), rawFlag.asJson()); } private static void assertNoFlagData(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target) { List<FlagData> data = getData(archive, flagId, target); assertTrue(data.isEmpty()); } private static List<FlagData> getData(SystemFlagsDataArchive archive, FlagId flagId, FlagsTarget target) { return archive.flagData(target).stream() .filter(d -> d.id().equals(flagId)) .collect(toList()); } private static class SimpleZone implements ZoneApi { final ZoneId zoneId; SimpleZone(String zoneId) { this.zoneId = ZoneId.from(zoneId); } @Override public SystemName getSystemName() { return SystemName.main; } @Override public ZoneId getId() { return zoneId; } @Override public CloudName getCloudName() { throw new UnsupportedOperationException(); } @Override public String getCloudNativeRegionName() { throw new UnsupportedOperationException(); } } }
Should be closed. 🙂
public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); deploymentExpirer = new DeploymentExpirer(controller, intervals.defaultInterval); deploymentIssueReporter = new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval); metricsReporter = new MetricsReporter(controller, metric); outstandingChangeDeployer = new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer); versionStatusUpdater = new VersionStatusUpdater(controller, intervals.versionStatusUpdater); upgrader = new Upgrader(controller, intervals.defaultInterval); readyJobsTrigger = new ReadyJobsTrigger(controller, intervals.readyJobsTrigger); deploymentMetricsMaintainer = new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer); applicationOwnershipConfirmer = new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues()); systemUpgrader = new SystemUpgrader(controller, intervals.systemUpgrader); jobRunner = new JobRunner(controller, intervals.jobRunner); osUpgraders = osUpgraders(controller, intervals.osUpgrader); osVersionStatusUpdater = new OsVersionStatusUpdater(controller, intervals.defaultInterval); contactInformationMaintainer = new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer); nameServiceDispatcher = new NameServiceDispatcher(controller, intervals.nameServiceDispatcher); costReportMaintainer = new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer()); resourceMeterMaintainer = new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService()); cloudEventReporter = new CloudEventReporter(controller, intervals.cloudEventReporter, metric); rotationStatusUpdater = new RotationStatusUpdater(controller, intervals.defaultInterval); resourceTagMaintainer = new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger()); systemRoutingPolicyMaintainer = new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer); applicationMetaDataGarbageCollector = new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector); containerImageExpirer = new ContainerImageExpirer(controller, intervals.containerImageExpirer); hostSwitchUpdater = new HostSwitchUpdater(controller, intervals.hostSwitchUpdater); reindexingTriggerer = new ReindexingTriggerer(controller, intervals.reindexingTriggerer); }
reindexingTriggerer = new ReindexingTriggerer(controller, intervals.reindexingTriggerer);
public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); upgrader = new Upgrader(controller, intervals.defaultInterval); maintainers.add(upgrader); maintainers.addAll(osUpgraders(controller, intervals.osUpgrader)); maintainers.add(new DeploymentExpirer(controller, intervals.defaultInterval)); maintainers.add(new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval)); maintainers.add(new MetricsReporter(controller, metric)); maintainers.add(new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer)); maintainers.add(new VersionStatusUpdater(controller, intervals.versionStatusUpdater)); maintainers.add(new ReadyJobsTrigger(controller, intervals.readyJobsTrigger)); maintainers.add(new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer)); maintainers.add(new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues())); maintainers.add(new SystemUpgrader(controller, intervals.systemUpgrader)); maintainers.add(new JobRunner(controller, intervals.jobRunner)); maintainers.add(new OsVersionStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer)); maintainers.add(new NameServiceDispatcher(controller, intervals.nameServiceDispatcher)); maintainers.add(new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer())); maintainers.add(new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService())); maintainers.add(new CloudEventReporter(controller, intervals.cloudEventReporter, metric)); maintainers.add(new RotationStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger())); maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer)); maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector)); maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer)); maintainers.add(new HostSwitchUpdater(controller, intervals.hostSwitchUpdater)); maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer)); }
class ControllerMaintenance extends AbstractComponent { private final DeploymentExpirer deploymentExpirer; private final DeploymentIssueReporter deploymentIssueReporter; private final MetricsReporter metricsReporter; private final OutstandingChangeDeployer outstandingChangeDeployer; private final VersionStatusUpdater versionStatusUpdater; private final Upgrader upgrader; private final ReadyJobsTrigger readyJobsTrigger; private final DeploymentMetricsMaintainer deploymentMetricsMaintainer; private final ApplicationOwnershipConfirmer applicationOwnershipConfirmer; private final SystemUpgrader systemUpgrader; private final List<OsUpgrader> osUpgraders; private final OsVersionStatusUpdater osVersionStatusUpdater; private final JobRunner jobRunner; private final ContactInformationMaintainer contactInformationMaintainer; private final CostReportMaintainer costReportMaintainer; private final ResourceMeterMaintainer resourceMeterMaintainer; private final NameServiceDispatcher nameServiceDispatcher; private final CloudEventReporter cloudEventReporter; private final RotationStatusUpdater rotationStatusUpdater; private final ResourceTagMaintainer resourceTagMaintainer; private final SystemRoutingPolicyMaintainer systemRoutingPolicyMaintainer; private final ApplicationMetaDataGarbageCollector applicationMetaDataGarbageCollector; private final ContainerImageExpirer containerImageExpirer; private final HostSwitchUpdater hostSwitchUpdater; private final ReindexingTriggerer reindexingTriggerer; @Inject @SuppressWarnings("unused") public Upgrader upgrader() { return upgrader; } @Override public void deconstruct() { deploymentExpirer.close(); deploymentIssueReporter.close(); metricsReporter.close(); outstandingChangeDeployer.close(); versionStatusUpdater.close(); upgrader.close(); readyJobsTrigger.close(); deploymentMetricsMaintainer.close(); applicationOwnershipConfirmer.close(); systemUpgrader.close(); osUpgraders.forEach(ControllerMaintainer::close); osVersionStatusUpdater.close(); jobRunner.close(); contactInformationMaintainer.close(); costReportMaintainer.close(); resourceMeterMaintainer.close(); nameServiceDispatcher.close(); cloudEventReporter.close(); rotationStatusUpdater.close(); resourceTagMaintainer.close(); systemRoutingPolicyMaintainer.close(); applicationMetaDataGarbageCollector.close(); containerImageExpirer.close(); hostSwitchUpdater.close(); } /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
class ControllerMaintenance extends AbstractComponent { private final Upgrader upgrader; private final List<Maintainer> maintainers = new CopyOnWriteArrayList<>(); @Inject @SuppressWarnings("unused") public Upgrader upgrader() { return upgrader; } @Override public void deconstruct() { maintainers.forEach(Maintainer::shutdown); maintainers.forEach(Maintainer::awaitShutdown); } /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
Not if the list is already unmodifiable, which is the most likely case. Then it will only return the argument.
public List<Integer> getOrderByIndexes() { return List.copyOf(orderByIdx); }
return List.copyOf(orderByIdx);
public List<Integer> getOrderByIndexes() { return List.copyOf(orderByIdx); }
class Group extends Identifiable { public static final int classId = registerClass(0x4000 + 90, Group.class); private static final ObjectPredicate REF_LOCATOR = new RefLocator(); private List<Integer> orderByIdx = List.of(); private List<ExpressionNode> orderByExp = List.of(); private List<AggregationResult> aggregationResults = List.of(); private List<Group> children = List.of(); private ResultNode id = null; private double rank; private int tag = -1; private SortType sortType = SortType.UNSORTED; private static <T> List<T> add(List<T> oldList, T obj) { if (oldList.isEmpty()) { return List.of(obj); } if (oldList.size() == 1) { return List.of(oldList.get(0), obj); } List<T> newList = (oldList instanceof ArrayList) ? oldList : new ArrayList<>(oldList); newList.add(obj); return newList; } private static <T> List<T> sort(List<T> list, Comparator<T> cmp) { if (list instanceof ArrayList) { list.sort(cmp); return list; } else { return list.stream().sorted(cmp).collect(Collectors.toList()); } } /** * This tells you if the children are ranked by the pure relevance or by a more complex expression. * That indicates if the rank score from the child can be used for ordering. * * @return true if it ranked by pure relevance. */ public boolean isRankedByRelevance() { return orderByIdx.isEmpty(); } /** * Merges the content of the given group <b>into</b> this. When this function returns, make sure to call * {@link * * @param firstLevel The first level to merge. * @param currentLevel The current level. * @param rhs The group to merge with. */ public void merge(int firstLevel, int currentLevel, Group rhs) { if (rhs.rank > rank) { rank = rhs.rank; } if (currentLevel >= firstLevel) { for (int i = 0, len = aggregationResults.size(); i < len; ++i) { aggregationResults.get(i).merge(rhs.aggregationResults.get(i)); } } ArrayList<Group> merged = new ArrayList<>(); Iterator<Group> lhsChild = children.iterator(), rhsChild = rhs.children.iterator(); if (lhsChild.hasNext() && rhsChild.hasNext()) { Group lhsGroup = lhsChild.next(); Group rhsGroup = rhsChild.next(); while ((lhsGroup != null) && (rhsGroup != null)) { int cmp = lhsGroup.getId().compareTo(rhsGroup.getId()); if (cmp < 0) { merged.add(lhsGroup); lhsGroup = lhsChild.hasNext() ? lhsChild.next() : null; } else if (cmp > 0) { merged.add(rhsGroup); rhsGroup = rhsChild.hasNext() ? rhsChild.next() : null; } else { lhsGroup.merge(firstLevel, currentLevel + 1, rhsGroup); merged.add(lhsGroup); lhsGroup = lhsChild.hasNext() ? lhsChild.next() : null; rhsGroup = rhsChild.hasNext() ? rhsChild.next() : null; } } if (lhsGroup != null) { merged.add(lhsGroup); } if (rhsGroup != null) { merged.add(rhsGroup); } } while (lhsChild.hasNext()) { merged.add(lhsChild.next()); } while (rhsChild.hasNext()) { merged.add(rhsChild.next()); } children = merged; } private void executeOrderBy() { for (ExpressionNode node : orderByExp) { node.prepare(); node.execute(); } } /** * After merging, this method will prune all levels so that they do not exceed the configured maximum number of * groups per level. * * @param levels The specs of all grouping levels. * @param firstLevel The first level to merge. * @param currentLevel The current level. */ public void postMerge(List<GroupingLevel> levels, int firstLevel, int currentLevel) { if (currentLevel >= firstLevel) { for (AggregationResult result : aggregationResults) { result.postMerge(); } for (ExpressionNode result : orderByExp) { result.execute(); } } if (currentLevel < levels.size()) { int maxGroups = (int)levels.get(currentLevel).getMaxGroups(); for (Group group : children) { group.executeOrderBy(); } if (maxGroups >= 0 && children.size() > maxGroups) { sortChildrenByRank(); children = children.subList(0, maxGroups); sortChildrenById(); } for (Group group : children) { group.postMerge(levels, firstLevel, currentLevel + 1); } } } /** Sorts the children by their id, if they are not sorted already. */ public void sortChildrenById() { if (sortType == SortType.BYID) { return; } children = sort(children, Group::compareId); sortType = SortType.BYID; } /** Sorts the children by their rank, if they are not sorted already. */ public void sortChildrenByRank() { if (sortType == SortType.BYRANK) { return; } children = sort(children, Group::compareRank); sortType = SortType.BYRANK; } /** * Returns the label to use for this group. See comment on {@link * on the rationale of this being a {@link ResultNode}. */ public ResultNode getId() { return id; } /** * Sets the label to use for this group. This is a {@link ResultNode} so that a group can be labeled with * whatever value the classifier expression returns. * * @param id the label to set * @return this, to allow chaining */ public Group setId(ResultNode id) { this.id = id; return this; } /** * Sets the relevancy to use for this group. * * @param rank The rank to set. * @return This, to allow chaining. */ public Group setRank(double rank) { this.rank = rank; return this; } /** Return the rank score of this group. */ public double getRank() { return rank; } /** * Adds a child group to this. * * @param child The group to add. * @return This, to allow chaining. */ public Group addChild(Group child) { if (child == null) { throw new IllegalArgumentException("Child can not be null."); } children = add(children, child); return this; } /** Returns the list of child groups to this. */ public List<Group> getChildren() { return List.copyOf(children); } /** * Returns the tag of this group. This value is set per-level in the grouping request, and then becomes assigned * to each group of that level in the grouping result as they are copied from the prototype. */ public int getTag() { return tag; } /** * Assigns a tag to this group. * * @param tag the numerical tag to set * @return this, to allow chaining */ public Group setTag(int tag) { this.tag = tag; return this; } /** * Returns this group's aggregation results. * * @return the aggregation results */ public List<AggregationResult> getAggregationResults() { return List.copyOf(aggregationResults); } /** * Adds an aggregation result to this group. * * @param result the result to add * @return this, to allow chaining */ public Group addAggregationResult(AggregationResult result) { aggregationResults = add(aggregationResults, result); return this; } /** * Adds an order-by expression to this group. If the expression is an AggregationResult, it will be added to the * list of this group's AggregationResults, and a reference to that expression is added instead. If the * AggregationResult is already present, a reference to THAT result is created instead. * * @param exp the result to add * @param asc true to sort ascending, false to sort descending * @return this, to allow chaining */ public Group addOrderBy(ExpressionNode exp, boolean asc) { if (exp instanceof AggregationResult) { exp = new AggregationRefNode((AggregationResult)exp); } RefResolver refResolver = new RefResolver(aggregationResults); exp.select(REF_LOCATOR, refResolver); aggregationResults = refResolver.results; orderByExp = add(orderByExp, exp); orderByIdx = add(orderByIdx, (asc ? 1 : -1) * orderByExp.size()); return this; } public List<ExpressionNode> getOrderByExpressions() { return List.copyOf(orderByExp); } private int compareId(Group rhs) { return getId().compareTo(rhs.getId()); } private int compareRank(Group rhs) { long diff = 0; for (int i = 0, m = orderByIdx.size(); (diff == 0) && (i < m); i++) { int rawIndex = orderByIdx.get(i); int index = ((rawIndex < 0) ? -rawIndex : rawIndex) - 1; diff = orderByExp.get(index).getResult().compareTo(rhs.orderByExp.get(index).getResult()); diff = diff * rawIndex; } if (diff < 0) { return -1; } if (diff > 0) { return 1; } return -Double.compare(rank, rhs.rank); } @Override protected int onGetClassId() { return classId; } @Override protected void onSerialize(Serializer buf) { super.onSerialize(buf); serializeOptional(buf, id); buf.putDouble(null, rank); int sz = orderByIdx.size(); buf.putInt(null, sz); for (Integer index : orderByIdx) { buf.putInt(null, index); } int numResults = aggregationResults.size(); buf.putInt(null, numResults); for (AggregationResult a : aggregationResults) { serializeOptional(buf, a); } int numExpressionResults = orderByExp.size(); buf.putInt(null, numExpressionResults); for (ExpressionNode e : orderByExp) { serializeOptional(buf, e); } int numGroups = children.size(); buf.putInt(null, numGroups); for (Group g : children) { g.serializeWithId(buf); } buf.putInt(null, tag); } @Override protected void onDeserialize(Deserializer buf) { super.onDeserialize(buf); id = (ResultNode)deserializeOptional(buf); rank = buf.getDouble(null); orderByIdx = List.of(); int orderByCount = buf.getInt(null); if (orderByCount > 0) { Integer [] idxes = new Integer[orderByCount]; for (int i = 0; i < orderByCount; i++) { idxes[i] = buf.getInt(null); } orderByIdx = List.of(idxes); } int numResults = buf.getInt(null); if (numResults > 0) { AggregationResult [] results = new AggregationResult[numResults]; for (int i = 0; i < numResults; i++) { results[i] = (AggregationResult) deserializeOptional(buf); } aggregationResults = List.of(results); } else { aggregationResults = List.of(); } int numExpressionResults = buf.getInt(null); if (numExpressionResults > 0) { RefResolver resolver = new RefResolver(aggregationResults); ExpressionNode[] orderBy = new ExpressionNode[numExpressionResults]; for (int i = 0; i < numExpressionResults; i++) { ExpressionNode exp = (ExpressionNode) deserializeOptional(buf); exp.select(REF_LOCATOR, resolver); orderBy[i] = exp; } aggregationResults = resolver.results; orderByExp = List.of(orderBy); } else { orderByExp = List.of(); } int numGroups = buf.getInt(null); if (numGroups > 0) { Group [] groups = new Group[numGroups]; for (int i = 0; i < numGroups; i++) { Group g = new Group(); g.deserializeWithId(buf); groups[i] = g; } children = List.of(groups); } else { children = List.of(); } tag = buf.getInt(null); } @Override public int hashCode() { return super.hashCode() + aggregationResults.hashCode() + children.hashCode(); } @Override public boolean equals(Object obj) { if (obj == this) return true; if (!super.equals(obj)) return false; Group rhs = (Group)obj; if (!equals(id, rhs.id)) return false; if (rank != rhs.rank) return false; if (!aggregationResults.equals(rhs.aggregationResults)) return false; if (!orderByIdx.equals(rhs.orderByIdx)) return false; if (!orderByExp.equals(rhs.orderByExp)) return false; if (!children.equals(rhs.children)) return false; return true; } @Override public Group clone() { Group obj = (Group)super.clone(); if (id != null) { obj.id = (ResultNode)id.clone(); } if ( ! aggregationResults.isEmpty() ) { AggregationResult [] results = new AggregationResult[aggregationResults.size()]; int i = 0; for (AggregationResult result : aggregationResults) { results[i++] = result.clone(); } obj.aggregationResults = List.of(results); } obj.orderByIdx = List.copyOf(orderByIdx); if ( ! orderByExp.isEmpty()) { obj.orderByExp = new ArrayList<>(); RefResolver resolver = new RefResolver(obj.aggregationResults); ExpressionNode[] orderBy = new ExpressionNode[orderByExp.size()]; int i = 0; for (ExpressionNode exp : orderByExp) { exp = exp.clone(); exp.select(REF_LOCATOR, resolver); orderBy[i++] = exp; } obj.orderByExp = List.of(orderBy); obj.aggregationResults = resolver.results; } if ( ! children.isEmpty() ) { Group [] groups = new Group[children.size()]; int i = 0; for (Group child : children) { groups[i++] = child.clone(); } obj.children = List.of(groups); } return obj; } @Override public void visitMembers(ObjectVisitor visitor) { super.visitMembers(visitor); visitor.visit("id", id); visitor.visit("rank", rank); visitor.visit("aggregationresults", aggregationResults); visitor.visit("orderby-idx", orderByIdx); visitor.visit("orderby-exp", orderByExp); visitor.visit("children", children); visitor.visit("tag", tag); } @Override public void selectMembers(ObjectPredicate predicate, ObjectOperation operation) { for (AggregationResult result : aggregationResults) { result.select(predicate, operation); } for (ExpressionNode exp : orderByExp) { exp.select(predicate, operation); } } private enum SortType { UNSORTED, BYRANK, BYID } private static class RefLocator implements ObjectPredicate { @Override public boolean check(Object obj) { return obj instanceof AggregationRefNode; } } private static class RefResolver implements ObjectOperation { List<AggregationResult> results; RefResolver(List<AggregationResult> initial) { this.results = initial; } @Override public void execute(Object obj) { AggregationRefNode ref = (AggregationRefNode)obj; int idx = ref.getIndex(); if (idx < 0) { AggregationResult res = ref.getExpression(); idx = indexOf(res); if (idx < 0) { idx = results.size(); results = add(results, res); } ref.setIndex(idx); } else { ref.setExpression(results.get(idx)); } } int indexOf(AggregationResult lhs) { int prevTag = lhs.getTag(); for (int i = 0, len = results.size(); i < len; ++i) { AggregationResult rhs = results.get(i); lhs.setTag(rhs.getTag()); if (lhs.equals(rhs)) { return i; } } lhs.setTag(prevTag); return -1; } } }
class Group extends Identifiable { public static final int classId = registerClass(0x4000 + 90, Group.class); private static final ObjectPredicate REF_LOCATOR = new RefLocator(); private List<Integer> orderByIdx = List.of(); private List<ExpressionNode> orderByExp = List.of(); private List<AggregationResult> aggregationResults = List.of(); private List<Group> children = List.of(); private ResultNode id = null; private double rank; private int tag = -1; private SortType sortType = SortType.UNSORTED; private static <T> List<T> add(List<T> oldList, T obj) { if (oldList.isEmpty()) { return List.of(obj); } if (oldList.size() == 1) { return List.of(oldList.get(0), obj); } List<T> newList = (oldList instanceof ArrayList) ? oldList : new ArrayList<>(oldList); newList.add(obj); return newList; } private static <T> List<T> sort(List<T> list, Comparator<T> cmp) { if (list instanceof ArrayList) { list.sort(cmp); return list; } else { if (list.size() < 2) return list; if (list.size() == 2) { return (cmp.compare(list.get(0), list.get(1)) > 0) ? List.of(list.get(1), list.get(0)) : list; } return list.stream().sorted(cmp).collect(Collectors.toList()); } } /** * This tells you if the children are ranked by the pure relevance or by a more complex expression. * That indicates if the rank score from the child can be used for ordering. * * @return true if it ranked by pure relevance. */ public boolean isRankedByRelevance() { return orderByIdx.isEmpty(); } /** * Merges the content of the given group <b>into</b> this. When this function returns, make sure to call * {@link * * @param firstLevel The first level to merge. * @param currentLevel The current level. * @param rhs The group to merge with. */ public void merge(int firstLevel, int currentLevel, Group rhs) { if (rhs.rank > rank) { rank = rhs.rank; } if (currentLevel >= firstLevel) { for (int i = 0, len = aggregationResults.size(); i < len; ++i) { aggregationResults.get(i).merge(rhs.aggregationResults.get(i)); } } ArrayList<Group> merged = new ArrayList<>(); Iterator<Group> lhsChild = children.iterator(), rhsChild = rhs.children.iterator(); if (lhsChild.hasNext() && rhsChild.hasNext()) { Group lhsGroup = lhsChild.next(); Group rhsGroup = rhsChild.next(); while ((lhsGroup != null) && (rhsGroup != null)) { int cmp = lhsGroup.getId().compareTo(rhsGroup.getId()); if (cmp < 0) { merged.add(lhsGroup); lhsGroup = lhsChild.hasNext() ? lhsChild.next() : null; } else if (cmp > 0) { merged.add(rhsGroup); rhsGroup = rhsChild.hasNext() ? rhsChild.next() : null; } else { lhsGroup.merge(firstLevel, currentLevel + 1, rhsGroup); merged.add(lhsGroup); lhsGroup = lhsChild.hasNext() ? lhsChild.next() : null; rhsGroup = rhsChild.hasNext() ? rhsChild.next() : null; } } if (lhsGroup != null) { merged.add(lhsGroup); } if (rhsGroup != null) { merged.add(rhsGroup); } } while (lhsChild.hasNext()) { merged.add(lhsChild.next()); } while (rhsChild.hasNext()) { merged.add(rhsChild.next()); } children = merged; } private void executeOrderBy() { for (ExpressionNode node : orderByExp) { node.prepare(); node.execute(); } } /** * After merging, this method will prune all levels so that they do not exceed the configured maximum number of * groups per level. * * @param levels The specs of all grouping levels. * @param firstLevel The first level to merge. * @param currentLevel The current level. */ public void postMerge(List<GroupingLevel> levels, int firstLevel, int currentLevel) { if (currentLevel >= firstLevel) { for (AggregationResult result : aggregationResults) { result.postMerge(); } for (ExpressionNode result : orderByExp) { result.execute(); } } if (currentLevel < levels.size()) { int maxGroups = (int)levels.get(currentLevel).getMaxGroups(); for (Group group : children) { group.executeOrderBy(); } if (maxGroups >= 0 && children.size() > maxGroups) { sortChildrenByRank(); children = children.subList(0, maxGroups); sortChildrenById(); } for (Group group : children) { group.postMerge(levels, firstLevel, currentLevel + 1); } } } /** Sorts the children by their id, if they are not sorted already. */ public void sortChildrenById() { if (sortType == SortType.BYID) { return; } children = sort(children, Group::compareId); sortType = SortType.BYID; } /** Sorts the children by their rank, if they are not sorted already. */ public void sortChildrenByRank() { if (sortType == SortType.BYRANK) { return; } children = sort(children, Group::compareRank); sortType = SortType.BYRANK; } /** * Returns the label to use for this group. See comment on {@link * on the rationale of this being a {@link ResultNode}. */ public ResultNode getId() { return id; } /** * Sets the label to use for this group. This is a {@link ResultNode} so that a group can be labeled with * whatever value the classifier expression returns. * * @param id the label to set * @return this, to allow chaining */ public Group setId(ResultNode id) { this.id = id; return this; } /** * Sets the relevancy to use for this group. * * @param rank The rank to set. * @return This, to allow chaining. */ public Group setRank(double rank) { this.rank = rank; return this; } /** Return the rank score of this group. */ public double getRank() { return rank; } /** * Adds a child group to this. * * @param child The group to add. * @return This, to allow chaining. */ public Group addChild(Group child) { if (child == null) { throw new IllegalArgumentException("Child can not be null."); } children = add(children, child); return this; } /** Returns the list of child groups to this. */ public List<Group> getChildren() { return List.copyOf(children); } /** * Returns the tag of this group. This value is set per-level in the grouping request, and then becomes assigned * to each group of that level in the grouping result as they are copied from the prototype. */ public int getTag() { return tag; } /** * Assigns a tag to this group. * * @param tag the numerical tag to set * @return this, to allow chaining */ public Group setTag(int tag) { this.tag = tag; return this; } /** * Returns this group's aggregation results. * * @return the aggregation results */ public List<AggregationResult> getAggregationResults() { return List.copyOf(aggregationResults); } /** * Adds an aggregation result to this group. * * @param result the result to add * @return this, to allow chaining */ public Group addAggregationResult(AggregationResult result) { aggregationResults = add(aggregationResults, result); return this; } /** * Adds an order-by expression to this group. If the expression is an AggregationResult, it will be added to the * list of this group's AggregationResults, and a reference to that expression is added instead. If the * AggregationResult is already present, a reference to THAT result is created instead. * * @param exp the result to add * @param asc true to sort ascending, false to sort descending * @return this, to allow chaining */ public Group addOrderBy(ExpressionNode exp, boolean asc) { if (exp instanceof AggregationResult) { exp = new AggregationRefNode((AggregationResult)exp); } RefResolver refResolver = new RefResolver(aggregationResults); exp.select(REF_LOCATOR, refResolver); aggregationResults = refResolver.results; orderByExp = add(orderByExp, exp); orderByIdx = add(orderByIdx, (asc ? 1 : -1) * orderByExp.size()); return this; } public List<ExpressionNode> getOrderByExpressions() { return List.copyOf(orderByExp); } private int compareId(Group rhs) { return getId().compareTo(rhs.getId()); } private int compareRank(Group rhs) { long diff = 0; for (int i = 0, m = orderByIdx.size(); (diff == 0) && (i < m); i++) { int rawIndex = orderByIdx.get(i); int index = ((rawIndex < 0) ? -rawIndex : rawIndex) - 1; diff = orderByExp.get(index).getResult().compareTo(rhs.orderByExp.get(index).getResult()); diff = diff * rawIndex; } if (diff < 0) { return -1; } if (diff > 0) { return 1; } return -Double.compare(rank, rhs.rank); } @Override protected int onGetClassId() { return classId; } @Override protected void onSerialize(Serializer buf) { super.onSerialize(buf); serializeOptional(buf, id); buf.putDouble(null, rank); int sz = orderByIdx.size(); buf.putInt(null, sz); for (Integer index : orderByIdx) { buf.putInt(null, index); } int numResults = aggregationResults.size(); buf.putInt(null, numResults); for (AggregationResult a : aggregationResults) { serializeOptional(buf, a); } int numExpressionResults = orderByExp.size(); buf.putInt(null, numExpressionResults); for (ExpressionNode e : orderByExp) { serializeOptional(buf, e); } int numGroups = children.size(); buf.putInt(null, numGroups); for (Group g : children) { g.serializeWithId(buf); } buf.putInt(null, tag); } @Override protected void onDeserialize(Deserializer buf) { super.onDeserialize(buf); id = (ResultNode)deserializeOptional(buf); rank = buf.getDouble(null); orderByIdx = List.of(); int orderByCount = buf.getInt(null); if (orderByCount > 0) { Integer [] idxes = new Integer[orderByCount]; for (int i = 0; i < orderByCount; i++) { idxes[i] = buf.getInt(null); } orderByIdx = List.of(idxes); } int numResults = buf.getInt(null); if (numResults > 0) { AggregationResult [] results = new AggregationResult[numResults]; for (int i = 0; i < numResults; i++) { results[i] = (AggregationResult) deserializeOptional(buf); } aggregationResults = List.of(results); } else { aggregationResults = List.of(); } int numExpressionResults = buf.getInt(null); if (numExpressionResults > 0) { RefResolver resolver = new RefResolver(aggregationResults); ExpressionNode[] orderBy = new ExpressionNode[numExpressionResults]; for (int i = 0; i < numExpressionResults; i++) { ExpressionNode exp = (ExpressionNode) deserializeOptional(buf); exp.select(REF_LOCATOR, resolver); orderBy[i] = exp; } aggregationResults = resolver.results; orderByExp = List.of(orderBy); } else { orderByExp = List.of(); } int numGroups = buf.getInt(null); if (numGroups > 0) { Group [] groups = new Group[numGroups]; for (int i = 0; i < numGroups; i++) { Group g = new Group(); g.deserializeWithId(buf); groups[i] = g; } children = List.of(groups); } else { children = List.of(); } tag = buf.getInt(null); } @Override public int hashCode() { return super.hashCode() + aggregationResults.hashCode() + children.hashCode(); } @Override public boolean equals(Object obj) { if (obj == this) return true; if (!super.equals(obj)) return false; Group rhs = (Group)obj; if (!equals(id, rhs.id)) return false; if (rank != rhs.rank) return false; if (!aggregationResults.equals(rhs.aggregationResults)) return false; if (!orderByIdx.equals(rhs.orderByIdx)) return false; if (!orderByExp.equals(rhs.orderByExp)) return false; if (!children.equals(rhs.children)) return false; return true; } @Override public Group clone() { Group obj = (Group)super.clone(); if (id != null) { obj.id = (ResultNode)id.clone(); } if ( ! aggregationResults.isEmpty() ) { AggregationResult [] results = new AggregationResult[aggregationResults.size()]; int i = 0; for (AggregationResult result : aggregationResults) { results[i++] = result.clone(); } obj.aggregationResults = List.of(results); } obj.orderByIdx = List.copyOf(orderByIdx); if ( ! orderByExp.isEmpty()) { obj.orderByExp = new ArrayList<>(); RefResolver resolver = new RefResolver(obj.aggregationResults); ExpressionNode[] orderBy = new ExpressionNode[orderByExp.size()]; int i = 0; for (ExpressionNode exp : orderByExp) { exp = exp.clone(); exp.select(REF_LOCATOR, resolver); orderBy[i++] = exp; } obj.orderByExp = List.of(orderBy); obj.aggregationResults = resolver.results; } if ( ! children.isEmpty() ) { Group [] groups = new Group[children.size()]; int i = 0; for (Group child : children) { groups[i++] = child.clone(); } obj.children = List.of(groups); } return obj; } @Override public void visitMembers(ObjectVisitor visitor) { super.visitMembers(visitor); visitor.visit("id", id); visitor.visit("rank", rank); visitor.visit("aggregationresults", aggregationResults); visitor.visit("orderby-idx", orderByIdx); visitor.visit("orderby-exp", orderByExp); visitor.visit("children", children); visitor.visit("tag", tag); } @Override public void selectMembers(ObjectPredicate predicate, ObjectOperation operation) { for (AggregationResult result : aggregationResults) { result.select(predicate, operation); } for (ExpressionNode exp : orderByExp) { exp.select(predicate, operation); } } private enum SortType { UNSORTED, BYRANK, BYID } private static class RefLocator implements ObjectPredicate { @Override public boolean check(Object obj) { return obj instanceof AggregationRefNode; } } private static class RefResolver implements ObjectOperation { List<AggregationResult> results; RefResolver(List<AggregationResult> initial) { this.results = initial; } @Override public void execute(Object obj) { AggregationRefNode ref = (AggregationRefNode)obj; int idx = ref.getIndex(); if (idx < 0) { AggregationResult res = ref.getExpression(); idx = indexOf(res); if (idx < 0) { idx = results.size(); results = add(results, res); } ref.setIndex(idx); } else { ref.setExpression(results.get(idx)); } } int indexOf(AggregationResult lhs) { int prevTag = lhs.getTag(); for (int i = 0, len = results.size(); i < len; ++i) { AggregationResult rhs = results.get(i); lhs.setTag(rhs.getTag()); if (lhs.equals(rhs)) { return i; } } lhs.setTag(prevTag); return -1; } } }
Oooh :)
public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); deploymentExpirer = new DeploymentExpirer(controller, intervals.defaultInterval); deploymentIssueReporter = new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval); metricsReporter = new MetricsReporter(controller, metric); outstandingChangeDeployer = new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer); versionStatusUpdater = new VersionStatusUpdater(controller, intervals.versionStatusUpdater); upgrader = new Upgrader(controller, intervals.defaultInterval); readyJobsTrigger = new ReadyJobsTrigger(controller, intervals.readyJobsTrigger); deploymentMetricsMaintainer = new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer); applicationOwnershipConfirmer = new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues()); systemUpgrader = new SystemUpgrader(controller, intervals.systemUpgrader); jobRunner = new JobRunner(controller, intervals.jobRunner); osUpgraders = osUpgraders(controller, intervals.osUpgrader); osVersionStatusUpdater = new OsVersionStatusUpdater(controller, intervals.defaultInterval); contactInformationMaintainer = new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer); nameServiceDispatcher = new NameServiceDispatcher(controller, intervals.nameServiceDispatcher); costReportMaintainer = new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer()); resourceMeterMaintainer = new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService()); cloudEventReporter = new CloudEventReporter(controller, intervals.cloudEventReporter, metric); rotationStatusUpdater = new RotationStatusUpdater(controller, intervals.defaultInterval); resourceTagMaintainer = new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger()); systemRoutingPolicyMaintainer = new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer); applicationMetaDataGarbageCollector = new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector); containerImageExpirer = new ContainerImageExpirer(controller, intervals.containerImageExpirer); hostSwitchUpdater = new HostSwitchUpdater(controller, intervals.hostSwitchUpdater); reindexingTriggerer = new ReindexingTriggerer(controller, intervals.reindexingTriggerer); }
reindexingTriggerer = new ReindexingTriggerer(controller, intervals.reindexingTriggerer);
public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); upgrader = new Upgrader(controller, intervals.defaultInterval); maintainers.add(upgrader); maintainers.addAll(osUpgraders(controller, intervals.osUpgrader)); maintainers.add(new DeploymentExpirer(controller, intervals.defaultInterval)); maintainers.add(new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval)); maintainers.add(new MetricsReporter(controller, metric)); maintainers.add(new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer)); maintainers.add(new VersionStatusUpdater(controller, intervals.versionStatusUpdater)); maintainers.add(new ReadyJobsTrigger(controller, intervals.readyJobsTrigger)); maintainers.add(new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer)); maintainers.add(new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues())); maintainers.add(new SystemUpgrader(controller, intervals.systemUpgrader)); maintainers.add(new JobRunner(controller, intervals.jobRunner)); maintainers.add(new OsVersionStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer)); maintainers.add(new NameServiceDispatcher(controller, intervals.nameServiceDispatcher)); maintainers.add(new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer())); maintainers.add(new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService())); maintainers.add(new CloudEventReporter(controller, intervals.cloudEventReporter, metric)); maintainers.add(new RotationStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger())); maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer)); maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector)); maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer)); maintainers.add(new HostSwitchUpdater(controller, intervals.hostSwitchUpdater)); maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer)); }
class ControllerMaintenance extends AbstractComponent { private final DeploymentExpirer deploymentExpirer; private final DeploymentIssueReporter deploymentIssueReporter; private final MetricsReporter metricsReporter; private final OutstandingChangeDeployer outstandingChangeDeployer; private final VersionStatusUpdater versionStatusUpdater; private final Upgrader upgrader; private final ReadyJobsTrigger readyJobsTrigger; private final DeploymentMetricsMaintainer deploymentMetricsMaintainer; private final ApplicationOwnershipConfirmer applicationOwnershipConfirmer; private final SystemUpgrader systemUpgrader; private final List<OsUpgrader> osUpgraders; private final OsVersionStatusUpdater osVersionStatusUpdater; private final JobRunner jobRunner; private final ContactInformationMaintainer contactInformationMaintainer; private final CostReportMaintainer costReportMaintainer; private final ResourceMeterMaintainer resourceMeterMaintainer; private final NameServiceDispatcher nameServiceDispatcher; private final CloudEventReporter cloudEventReporter; private final RotationStatusUpdater rotationStatusUpdater; private final ResourceTagMaintainer resourceTagMaintainer; private final SystemRoutingPolicyMaintainer systemRoutingPolicyMaintainer; private final ApplicationMetaDataGarbageCollector applicationMetaDataGarbageCollector; private final ContainerImageExpirer containerImageExpirer; private final HostSwitchUpdater hostSwitchUpdater; private final ReindexingTriggerer reindexingTriggerer; @Inject @SuppressWarnings("unused") public Upgrader upgrader() { return upgrader; } @Override public void deconstruct() { deploymentExpirer.close(); deploymentIssueReporter.close(); metricsReporter.close(); outstandingChangeDeployer.close(); versionStatusUpdater.close(); upgrader.close(); readyJobsTrigger.close(); deploymentMetricsMaintainer.close(); applicationOwnershipConfirmer.close(); systemUpgrader.close(); osUpgraders.forEach(ControllerMaintainer::close); osVersionStatusUpdater.close(); jobRunner.close(); contactInformationMaintainer.close(); costReportMaintainer.close(); resourceMeterMaintainer.close(); nameServiceDispatcher.close(); cloudEventReporter.close(); rotationStatusUpdater.close(); resourceTagMaintainer.close(); systemRoutingPolicyMaintainer.close(); applicationMetaDataGarbageCollector.close(); containerImageExpirer.close(); hostSwitchUpdater.close(); } /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
class ControllerMaintenance extends AbstractComponent { private final Upgrader upgrader; private final List<Maintainer> maintainers = new CopyOnWriteArrayList<>(); @Inject @SuppressWarnings("unused") public Upgrader upgrader() { return upgrader; } @Override public void deconstruct() { maintainers.forEach(Maintainer::shutdown); maintainers.forEach(Maintainer::awaitShutdown); } /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
Better, but why do we need both if close calls shutdown when necessary?
public void deconstruct() { maintainers.forEach(Maintainer::shutdown); maintainers.forEach(Maintainer::close); }
maintainers.forEach(Maintainer::close);
public void deconstruct() { maintainers.forEach(Maintainer::shutdown); maintainers.forEach(Maintainer::awaitShutdown); }
class ControllerMaintenance extends AbstractComponent { private final Upgrader upgrader; private final List<Maintainer> maintainers = new CopyOnWriteArrayList<>(); @Inject @SuppressWarnings("unused") public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); upgrader = new Upgrader(controller, intervals.defaultInterval); maintainers.add(upgrader); maintainers.addAll(osUpgraders(controller, intervals.osUpgrader)); maintainers.add(new DeploymentExpirer(controller, intervals.defaultInterval)); maintainers.add(new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval)); maintainers.add(new MetricsReporter(controller, metric)); maintainers.add(new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer)); maintainers.add(new VersionStatusUpdater(controller, intervals.versionStatusUpdater)); maintainers.add(new ReadyJobsTrigger(controller, intervals.readyJobsTrigger)); maintainers.add(new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer)); maintainers.add(new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues())); maintainers.add(new SystemUpgrader(controller, intervals.systemUpgrader)); maintainers.add(new JobRunner(controller, intervals.jobRunner)); maintainers.add(new OsVersionStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer)); maintainers.add(new NameServiceDispatcher(controller, intervals.nameServiceDispatcher)); maintainers.add(new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer())); maintainers.add(new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService())); maintainers.add(new CloudEventReporter(controller, intervals.cloudEventReporter, metric)); maintainers.add(new RotationStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger())); maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer)); maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector)); maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer)); maintainers.add(new HostSwitchUpdater(controller, intervals.hostSwitchUpdater)); maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer)); } public Upgrader upgrader() { return upgrader; } @Override /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
class ControllerMaintenance extends AbstractComponent { private final Upgrader upgrader; private final List<Maintainer> maintainers = new CopyOnWriteArrayList<>(); @Inject @SuppressWarnings("unused") public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); upgrader = new Upgrader(controller, intervals.defaultInterval); maintainers.add(upgrader); maintainers.addAll(osUpgraders(controller, intervals.osUpgrader)); maintainers.add(new DeploymentExpirer(controller, intervals.defaultInterval)); maintainers.add(new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval)); maintainers.add(new MetricsReporter(controller, metric)); maintainers.add(new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer)); maintainers.add(new VersionStatusUpdater(controller, intervals.versionStatusUpdater)); maintainers.add(new ReadyJobsTrigger(controller, intervals.readyJobsTrigger)); maintainers.add(new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer)); maintainers.add(new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues())); maintainers.add(new SystemUpgrader(controller, intervals.systemUpgrader)); maintainers.add(new JobRunner(controller, intervals.jobRunner)); maintainers.add(new OsVersionStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer)); maintainers.add(new NameServiceDispatcher(controller, intervals.nameServiceDispatcher)); maintainers.add(new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer())); maintainers.add(new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService())); maintainers.add(new CloudEventReporter(controller, intervals.cloudEventReporter, metric)); maintainers.add(new RotationStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger())); maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer)); maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector)); maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer)); maintainers.add(new HostSwitchUpdater(controller, intervals.hostSwitchUpdater)); maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer)); } public Upgrader upgrader() { return upgrader; } @Override /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
If it's to initiate shutdown on everything at once and then wait, I suggest renaming `close`. `awaitShutdown`?
public void deconstruct() { maintainers.forEach(Maintainer::shutdown); maintainers.forEach(Maintainer::close); }
maintainers.forEach(Maintainer::close);
public void deconstruct() { maintainers.forEach(Maintainer::shutdown); maintainers.forEach(Maintainer::awaitShutdown); }
class ControllerMaintenance extends AbstractComponent { private final Upgrader upgrader; private final List<Maintainer> maintainers = new CopyOnWriteArrayList<>(); @Inject @SuppressWarnings("unused") public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); upgrader = new Upgrader(controller, intervals.defaultInterval); maintainers.add(upgrader); maintainers.addAll(osUpgraders(controller, intervals.osUpgrader)); maintainers.add(new DeploymentExpirer(controller, intervals.defaultInterval)); maintainers.add(new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval)); maintainers.add(new MetricsReporter(controller, metric)); maintainers.add(new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer)); maintainers.add(new VersionStatusUpdater(controller, intervals.versionStatusUpdater)); maintainers.add(new ReadyJobsTrigger(controller, intervals.readyJobsTrigger)); maintainers.add(new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer)); maintainers.add(new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues())); maintainers.add(new SystemUpgrader(controller, intervals.systemUpgrader)); maintainers.add(new JobRunner(controller, intervals.jobRunner)); maintainers.add(new OsVersionStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer)); maintainers.add(new NameServiceDispatcher(controller, intervals.nameServiceDispatcher)); maintainers.add(new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer())); maintainers.add(new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService())); maintainers.add(new CloudEventReporter(controller, intervals.cloudEventReporter, metric)); maintainers.add(new RotationStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger())); maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer)); maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector)); maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer)); maintainers.add(new HostSwitchUpdater(controller, intervals.hostSwitchUpdater)); maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer)); } public Upgrader upgrader() { return upgrader; } @Override /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
class ControllerMaintenance extends AbstractComponent { private final Upgrader upgrader; private final List<Maintainer> maintainers = new CopyOnWriteArrayList<>(); @Inject @SuppressWarnings("unused") public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); upgrader = new Upgrader(controller, intervals.defaultInterval); maintainers.add(upgrader); maintainers.addAll(osUpgraders(controller, intervals.osUpgrader)); maintainers.add(new DeploymentExpirer(controller, intervals.defaultInterval)); maintainers.add(new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval)); maintainers.add(new MetricsReporter(controller, metric)); maintainers.add(new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer)); maintainers.add(new VersionStatusUpdater(controller, intervals.versionStatusUpdater)); maintainers.add(new ReadyJobsTrigger(controller, intervals.readyJobsTrigger)); maintainers.add(new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer)); maintainers.add(new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues())); maintainers.add(new SystemUpgrader(controller, intervals.systemUpgrader)); maintainers.add(new JobRunner(controller, intervals.jobRunner)); maintainers.add(new OsVersionStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer)); maintainers.add(new NameServiceDispatcher(controller, intervals.nameServiceDispatcher)); maintainers.add(new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer())); maintainers.add(new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService())); maintainers.add(new CloudEventReporter(controller, intervals.cloudEventReporter, metric)); maintainers.add(new RotationStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger())); maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer)); maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector)); maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer)); maintainers.add(new HostSwitchUpdater(controller, intervals.hostSwitchUpdater)); maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer)); } public Upgrader upgrader() { return upgrader; } @Override /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
Good idea.
public void deconstruct() { maintainers.forEach(Maintainer::shutdown); maintainers.forEach(Maintainer::close); }
maintainers.forEach(Maintainer::close);
public void deconstruct() { maintainers.forEach(Maintainer::shutdown); maintainers.forEach(Maintainer::awaitShutdown); }
class ControllerMaintenance extends AbstractComponent { private final Upgrader upgrader; private final List<Maintainer> maintainers = new CopyOnWriteArrayList<>(); @Inject @SuppressWarnings("unused") public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); upgrader = new Upgrader(controller, intervals.defaultInterval); maintainers.add(upgrader); maintainers.addAll(osUpgraders(controller, intervals.osUpgrader)); maintainers.add(new DeploymentExpirer(controller, intervals.defaultInterval)); maintainers.add(new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval)); maintainers.add(new MetricsReporter(controller, metric)); maintainers.add(new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer)); maintainers.add(new VersionStatusUpdater(controller, intervals.versionStatusUpdater)); maintainers.add(new ReadyJobsTrigger(controller, intervals.readyJobsTrigger)); maintainers.add(new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer)); maintainers.add(new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues())); maintainers.add(new SystemUpgrader(controller, intervals.systemUpgrader)); maintainers.add(new JobRunner(controller, intervals.jobRunner)); maintainers.add(new OsVersionStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer)); maintainers.add(new NameServiceDispatcher(controller, intervals.nameServiceDispatcher)); maintainers.add(new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer())); maintainers.add(new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService())); maintainers.add(new CloudEventReporter(controller, intervals.cloudEventReporter, metric)); maintainers.add(new RotationStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger())); maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer)); maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector)); maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer)); maintainers.add(new HostSwitchUpdater(controller, intervals.hostSwitchUpdater)); maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer)); } public Upgrader upgrader() { return upgrader; } @Override /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
class ControllerMaintenance extends AbstractComponent { private final Upgrader upgrader; private final List<Maintainer> maintainers = new CopyOnWriteArrayList<>(); @Inject @SuppressWarnings("unused") public ControllerMaintenance(Controller controller, Metric metric) { Intervals intervals = new Intervals(controller.system()); upgrader = new Upgrader(controller, intervals.defaultInterval); maintainers.add(upgrader); maintainers.addAll(osUpgraders(controller, intervals.osUpgrader)); maintainers.add(new DeploymentExpirer(controller, intervals.defaultInterval)); maintainers.add(new DeploymentIssueReporter(controller, controller.serviceRegistry().deploymentIssues(), intervals.defaultInterval)); maintainers.add(new MetricsReporter(controller, metric)); maintainers.add(new OutstandingChangeDeployer(controller, intervals.outstandingChangeDeployer)); maintainers.add(new VersionStatusUpdater(controller, intervals.versionStatusUpdater)); maintainers.add(new ReadyJobsTrigger(controller, intervals.readyJobsTrigger)); maintainers.add(new DeploymentMetricsMaintainer(controller, intervals.deploymentMetricsMaintainer)); maintainers.add(new ApplicationOwnershipConfirmer(controller, intervals.applicationOwnershipConfirmer, controller.serviceRegistry().ownershipIssues())); maintainers.add(new SystemUpgrader(controller, intervals.systemUpgrader)); maintainers.add(new JobRunner(controller, intervals.jobRunner)); maintainers.add(new OsVersionStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ContactInformationMaintainer(controller, intervals.contactInformationMaintainer)); maintainers.add(new NameServiceDispatcher(controller, intervals.nameServiceDispatcher)); maintainers.add(new CostReportMaintainer(controller, intervals.costReportMaintainer, controller.serviceRegistry().costReportConsumer())); maintainers.add(new ResourceMeterMaintainer(controller, intervals.resourceMeterMaintainer, metric, controller.serviceRegistry().meteringService())); maintainers.add(new CloudEventReporter(controller, intervals.cloudEventReporter, metric)); maintainers.add(new RotationStatusUpdater(controller, intervals.defaultInterval)); maintainers.add(new ResourceTagMaintainer(controller, intervals.resourceTagMaintainer, controller.serviceRegistry().resourceTagger())); maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer)); maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector)); maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer)); maintainers.add(new HostSwitchUpdater(controller, intervals.hostSwitchUpdater)); maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer)); } public Upgrader upgrader() { return upgrader; } @Override /** Create one OS upgrader per cloud found in the zone registry of controller */ private static List<OsUpgrader> osUpgraders(Controller controller, Duration interval) { return controller.zoneRegistry().zones().controllerUpgraded().zones().stream() .map(ZoneApi::getCloudName) .distinct() .sorted() .map(cloud -> new OsUpgrader(controller, interval, cloud)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } private static class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); } private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } } }
Consider simplifying to `Math.max(1, minimumMeasurements)`.
private int minimumMeasurementsPerNode(Duration scalingWindow) { long minimumMeasurements = scalingWindow.toMinutes() / 5; minimumMeasurements = Math.round(0.8 * minimumMeasurements); if (minimumMeasurements < 1) minimumMeasurements = 1; return (int)minimumMeasurements; }
return (int)minimumMeasurements;
private int minimumMeasurementsPerNode(Duration scalingWindow) { long minimumMeasurements = scalingWindow.toMinutes() / 5; minimumMeasurements = Math.round(0.8 * minimumMeasurements); if (minimumMeasurements < 1) minimumMeasurements = 1; return (int)minimumMeasurements; }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, NodeList clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, NodeList clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none("Autoscaling is not enabled"); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } private Advice autoscale(Cluster cluster, NodeList clusterNodes, Limits limits, boolean exclusive) { if ( ! stable(clusterNodes, nodeRepository)) return Advice.none("Cluster change in progress"); Duration scalingWindow = scalingWindow(clusterNodes.clusterSpec(), cluster); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(nodeRepository.clock().instant().minus(scalingWindow), cluster, clusterNodes, metricsDb); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive()); int measurementsPerNode = clusterTimeseries.measurementsPerNode(); if (measurementsPerNode < minimumMeasurementsPerNode(scalingWindow)) return Advice.none("Collecting more data before making new scaling decisions" + " (has " + measurementsPerNode + " measurements per node but need " + minimumMeasurementsPerNode(scalingWindow) + ")"); int nodesMeasured = clusterTimeseries.nodesMeasured(); if (nodesMeasured != clusterNodes.size()) return Advice.none("Collecting more data before making new scaling decisions" + "(has measurements from " + nodesMeasured + " but need from " + clusterNodes.size() + ")"); double cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); double memoryLoad = clusterTimeseries.averageLoad(Resource.memory); double diskLoad = clusterTimeseries.averageLoad(Resource.disk); var target = ResourceTarget.idealLoad(cpuLoad, memoryLoad, diskLoad, currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale("No allocation changes are possible within configured limits"); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale("Cluster is ideally scaled (within configured limits)"); if (scaledIn(scalingWindow, cluster)) return Advice.dontScale("Won't autoscale now: Less than " + scalingWindow + " since last rescaling"); if (isDownscaling(bestAllocation.get(), currentAllocation) && scaledIn(scalingWindow.multipliedBy(3), cluster)) return Advice.dontScale("Waiting " + scalingWindow.multipliedBy(3) + " since last rescaling before reducing resources"); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** Returns true if this reduces total resources in any dimension */ private boolean isDownscaling(AllocatableClusterResources target, AllocatableClusterResources current) { NodeResources targetTotal = target.toAdvertisedClusterResources().totalResources(); NodeResources currentTotal = current.toAdvertisedClusterResources().totalResources(); return ! targetTotal.justNumbers().satisfies(currentTotal.justNumbers()); } private boolean scaledIn(Duration delay, Cluster cluster) { return cluster.lastScalingEvent().map(event -> event.at()).orElse(Instant.MIN) .isAfter(nodeRepository.clock().instant().minus(delay)); } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ private Duration scalingWindow(ClusterSpec clusterSpec, Cluster cluster) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(event.duration().get()); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative()) return Duration.ofHours(12); if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); return predictedDuration; } } static Duration maxScalingWindow() { return Duration.ofHours(48); } /** Returns the minimum measurements per node (average) we require to give autoscaling advice.*/ public static boolean stable(NodeList nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return false; if (nodeRepository.getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) return false; return true; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private final String reason; private Advice(Optional<ClusterResources> target, boolean present, String reason) { this.target = target; this.present = present; this.reason = Objects.requireNonNull(reason); } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } /** The reason for this advice */ public String reason() { return reason; } private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); } private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true, "Scaling due to load changes"); } @Override public String toString() { return "autoscaling advice: " + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); } } }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, NodeList clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, NodeList clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none("Autoscaling is not enabled"); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } private Advice autoscale(Cluster cluster, NodeList clusterNodes, Limits limits, boolean exclusive) { if ( ! stable(clusterNodes, nodeRepository)) return Advice.none("Cluster change in progress"); Duration scalingWindow = scalingWindow(clusterNodes.clusterSpec(), cluster); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(nodeRepository.clock().instant().minus(scalingWindow), cluster, clusterNodes, metricsDb); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive()); int measurementsPerNode = clusterTimeseries.measurementsPerNode(); if (measurementsPerNode < minimumMeasurementsPerNode(scalingWindow)) return Advice.none("Collecting more data before making new scaling decisions" + " (has " + measurementsPerNode + " measurements per node but need " + minimumMeasurementsPerNode(scalingWindow) + ")"); int nodesMeasured = clusterTimeseries.nodesMeasured(); if (nodesMeasured != clusterNodes.size()) return Advice.none("Collecting more data before making new scaling decisions" + "(has measurements from " + nodesMeasured + " but need from " + clusterNodes.size() + ")"); double cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); double memoryLoad = clusterTimeseries.averageLoad(Resource.memory); double diskLoad = clusterTimeseries.averageLoad(Resource.disk); var target = ResourceTarget.idealLoad(cpuLoad, memoryLoad, diskLoad, currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale("No allocation changes are possible within configured limits"); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale("Cluster is ideally scaled (within configured limits)"); if (scaledIn(scalingWindow, cluster)) return Advice.dontScale("Won't autoscale now: Less than " + scalingWindow + " since last rescaling"); if (isDownscaling(bestAllocation.get(), currentAllocation) && scaledIn(scalingWindow.multipliedBy(3), cluster)) return Advice.dontScale("Waiting " + scalingWindow.multipliedBy(3) + " since last rescaling before reducing resources"); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** Returns true if this reduces total resources in any dimension */ private boolean isDownscaling(AllocatableClusterResources target, AllocatableClusterResources current) { NodeResources targetTotal = target.toAdvertisedClusterResources().totalResources(); NodeResources currentTotal = current.toAdvertisedClusterResources().totalResources(); return ! targetTotal.justNumbers().satisfies(currentTotal.justNumbers()); } private boolean scaledIn(Duration delay, Cluster cluster) { return cluster.lastScalingEvent().map(event -> event.at()).orElse(Instant.MIN) .isAfter(nodeRepository.clock().instant().minus(delay)); } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ private Duration scalingWindow(ClusterSpec clusterSpec, Cluster cluster) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(event.duration().get()); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative()) return Duration.ofHours(12); if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); return predictedDuration; } } static Duration maxScalingWindow() { return Duration.ofHours(48); } /** Returns the minimum measurements per node (average) we require to give autoscaling advice.*/ public static boolean stable(NodeList nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return false; if (nodeRepository.getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) return false; return true; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private final String reason; private Advice(Optional<ClusterResources> target, boolean present, String reason) { this.target = target; this.present = present; this.reason = Objects.requireNonNull(reason); } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } /** The reason for this advice */ public String reason() { return reason; } private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); } private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true, "Scaling due to load changes"); } @Override public String toString() { return "autoscaling advice: " + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); } } }
Unmatched `(`. 😄
private int minimumMeasurementsPerNode(Duration scalingWindow) { long minimumMeasurements = scalingWindow.toMinutes() / 5; minimumMeasurements = Math.round(0.8 * minimumMeasurements); if (minimumMeasurements < 1) minimumMeasurements = 1; return (int)minimumMeasurements; }
private int minimumMeasurementsPerNode(Duration scalingWindow) { long minimumMeasurements = scalingWindow.toMinutes() / 5; minimumMeasurements = Math.round(0.8 * minimumMeasurements); if (minimumMeasurements < 1) minimumMeasurements = 1; return (int)minimumMeasurements; }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, NodeList clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, NodeList clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none("Autoscaling is not enabled"); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } private Advice autoscale(Cluster cluster, NodeList clusterNodes, Limits limits, boolean exclusive) { if ( ! stable(clusterNodes, nodeRepository)) return Advice.none("Cluster change in progress"); Duration scalingWindow = scalingWindow(clusterNodes.clusterSpec(), cluster); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(nodeRepository.clock().instant().minus(scalingWindow), cluster, clusterNodes, metricsDb); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive()); int measurementsPerNode = clusterTimeseries.measurementsPerNode(); if (measurementsPerNode < minimumMeasurementsPerNode(scalingWindow)) return Advice.none("Collecting more data before making new scaling decisions" + " (has " + measurementsPerNode + " measurements per node but need " + minimumMeasurementsPerNode(scalingWindow) + ")"); int nodesMeasured = clusterTimeseries.nodesMeasured(); if (nodesMeasured != clusterNodes.size()) return Advice.none("Collecting more data before making new scaling decisions" + "(has measurements from " + nodesMeasured + " but need from " + clusterNodes.size() + ")"); double cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); double memoryLoad = clusterTimeseries.averageLoad(Resource.memory); double diskLoad = clusterTimeseries.averageLoad(Resource.disk); var target = ResourceTarget.idealLoad(cpuLoad, memoryLoad, diskLoad, currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale("No allocation changes are possible within configured limits"); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale("Cluster is ideally scaled (within configured limits)"); if (scaledIn(scalingWindow, cluster)) return Advice.dontScale("Won't autoscale now: Less than " + scalingWindow + " since last rescaling"); if (isDownscaling(bestAllocation.get(), currentAllocation) && scaledIn(scalingWindow.multipliedBy(3), cluster)) return Advice.dontScale("Waiting " + scalingWindow.multipliedBy(3) + " since last rescaling before reducing resources"); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** Returns true if this reduces total resources in any dimension */ private boolean isDownscaling(AllocatableClusterResources target, AllocatableClusterResources current) { NodeResources targetTotal = target.toAdvertisedClusterResources().totalResources(); NodeResources currentTotal = current.toAdvertisedClusterResources().totalResources(); return ! targetTotal.justNumbers().satisfies(currentTotal.justNumbers()); } private boolean scaledIn(Duration delay, Cluster cluster) { return cluster.lastScalingEvent().map(event -> event.at()).orElse(Instant.MIN) .isAfter(nodeRepository.clock().instant().minus(delay)); } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ private Duration scalingWindow(ClusterSpec clusterSpec, Cluster cluster) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(event.duration().get()); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative()) return Duration.ofHours(12); if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); return predictedDuration; } } static Duration maxScalingWindow() { return Duration.ofHours(48); } /** Returns the minimum measurements per node (average) we require to give autoscaling advice.*/ public static boolean stable(NodeList nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return false; if (nodeRepository.getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) return false; return true; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private final String reason; private Advice(Optional<ClusterResources> target, boolean present, String reason) { this.target = target; this.present = present; this.reason = Objects.requireNonNull(reason); } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } /** The reason for this advice */ public String reason() { return reason; } private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); } private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true, "Scaling due to load changes"); } @Override public String toString() { return "autoscaling advice: " + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); } } }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, NodeList clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, NodeList clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none("Autoscaling is not enabled"); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } private Advice autoscale(Cluster cluster, NodeList clusterNodes, Limits limits, boolean exclusive) { if ( ! stable(clusterNodes, nodeRepository)) return Advice.none("Cluster change in progress"); Duration scalingWindow = scalingWindow(clusterNodes.clusterSpec(), cluster); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(nodeRepository.clock().instant().minus(scalingWindow), cluster, clusterNodes, metricsDb); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive()); int measurementsPerNode = clusterTimeseries.measurementsPerNode(); if (measurementsPerNode < minimumMeasurementsPerNode(scalingWindow)) return Advice.none("Collecting more data before making new scaling decisions" + " (has " + measurementsPerNode + " measurements per node but need " + minimumMeasurementsPerNode(scalingWindow) + ")"); int nodesMeasured = clusterTimeseries.nodesMeasured(); if (nodesMeasured != clusterNodes.size()) return Advice.none("Collecting more data before making new scaling decisions" + "(has measurements from " + nodesMeasured + " but need from " + clusterNodes.size() + ")"); double cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); double memoryLoad = clusterTimeseries.averageLoad(Resource.memory); double diskLoad = clusterTimeseries.averageLoad(Resource.disk); var target = ResourceTarget.idealLoad(cpuLoad, memoryLoad, diskLoad, currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale("No allocation changes are possible within configured limits"); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale("Cluster is ideally scaled (within configured limits)"); if (scaledIn(scalingWindow, cluster)) return Advice.dontScale("Won't autoscale now: Less than " + scalingWindow + " since last rescaling"); if (isDownscaling(bestAllocation.get(), currentAllocation) && scaledIn(scalingWindow.multipliedBy(3), cluster)) return Advice.dontScale("Waiting " + scalingWindow.multipliedBy(3) + " since last rescaling before reducing resources"); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** Returns true if this reduces total resources in any dimension */ private boolean isDownscaling(AllocatableClusterResources target, AllocatableClusterResources current) { NodeResources targetTotal = target.toAdvertisedClusterResources().totalResources(); NodeResources currentTotal = current.toAdvertisedClusterResources().totalResources(); return ! targetTotal.justNumbers().satisfies(currentTotal.justNumbers()); } private boolean scaledIn(Duration delay, Cluster cluster) { return cluster.lastScalingEvent().map(event -> event.at()).orElse(Instant.MIN) .isAfter(nodeRepository.clock().instant().minus(delay)); } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ private Duration scalingWindow(ClusterSpec clusterSpec, Cluster cluster) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(event.duration().get()); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative()) return Duration.ofHours(12); if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); return predictedDuration; } } static Duration maxScalingWindow() { return Duration.ofHours(48); } /** Returns the minimum measurements per node (average) we require to give autoscaling advice.*/ public static boolean stable(NodeList nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return false; if (nodeRepository.getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) return false; return true; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private final String reason; private Advice(Optional<ClusterResources> target, boolean present, String reason) { this.target = target; this.present = present; this.reason = Objects.requireNonNull(reason); } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } /** The reason for this advice */ public String reason() { return reason; } private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); } private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true, "Scaling due to load changes"); } @Override public String toString() { return "autoscaling advice: " + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); } } }
Thanks. i'll fix it later
private int minimumMeasurementsPerNode(Duration scalingWindow) { long minimumMeasurements = scalingWindow.toMinutes() / 5; minimumMeasurements = Math.round(0.8 * minimumMeasurements); if (minimumMeasurements < 1) minimumMeasurements = 1; return (int)minimumMeasurements; }
private int minimumMeasurementsPerNode(Duration scalingWindow) { long minimumMeasurements = scalingWindow.toMinutes() / 5; minimumMeasurements = Math.round(0.8 * minimumMeasurements); if (minimumMeasurements < 1) minimumMeasurements = 1; return (int)minimumMeasurements; }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, NodeList clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, NodeList clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none("Autoscaling is not enabled"); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } private Advice autoscale(Cluster cluster, NodeList clusterNodes, Limits limits, boolean exclusive) { if ( ! stable(clusterNodes, nodeRepository)) return Advice.none("Cluster change in progress"); Duration scalingWindow = scalingWindow(clusterNodes.clusterSpec(), cluster); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(nodeRepository.clock().instant().minus(scalingWindow), cluster, clusterNodes, metricsDb); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive()); int measurementsPerNode = clusterTimeseries.measurementsPerNode(); if (measurementsPerNode < minimumMeasurementsPerNode(scalingWindow)) return Advice.none("Collecting more data before making new scaling decisions" + " (has " + measurementsPerNode + " measurements per node but need " + minimumMeasurementsPerNode(scalingWindow) + ")"); int nodesMeasured = clusterTimeseries.nodesMeasured(); if (nodesMeasured != clusterNodes.size()) return Advice.none("Collecting more data before making new scaling decisions" + "(has measurements from " + nodesMeasured + " but need from " + clusterNodes.size() + ")"); double cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); double memoryLoad = clusterTimeseries.averageLoad(Resource.memory); double diskLoad = clusterTimeseries.averageLoad(Resource.disk); var target = ResourceTarget.idealLoad(cpuLoad, memoryLoad, diskLoad, currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale("No allocation changes are possible within configured limits"); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale("Cluster is ideally scaled (within configured limits)"); if (scaledIn(scalingWindow, cluster)) return Advice.dontScale("Won't autoscale now: Less than " + scalingWindow + " since last rescaling"); if (isDownscaling(bestAllocation.get(), currentAllocation) && scaledIn(scalingWindow.multipliedBy(3), cluster)) return Advice.dontScale("Waiting " + scalingWindow.multipliedBy(3) + " since last rescaling before reducing resources"); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** Returns true if this reduces total resources in any dimension */ private boolean isDownscaling(AllocatableClusterResources target, AllocatableClusterResources current) { NodeResources targetTotal = target.toAdvertisedClusterResources().totalResources(); NodeResources currentTotal = current.toAdvertisedClusterResources().totalResources(); return ! targetTotal.justNumbers().satisfies(currentTotal.justNumbers()); } private boolean scaledIn(Duration delay, Cluster cluster) { return cluster.lastScalingEvent().map(event -> event.at()).orElse(Instant.MIN) .isAfter(nodeRepository.clock().instant().minus(delay)); } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ private Duration scalingWindow(ClusterSpec clusterSpec, Cluster cluster) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(event.duration().get()); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative()) return Duration.ofHours(12); if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); return predictedDuration; } } static Duration maxScalingWindow() { return Duration.ofHours(48); } /** Returns the minimum measurements per node (average) we require to give autoscaling advice.*/ public static boolean stable(NodeList nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return false; if (nodeRepository.getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) return false; return true; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private final String reason; private Advice(Optional<ClusterResources> target, boolean present, String reason) { this.target = target; this.present = present; this.reason = Objects.requireNonNull(reason); } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } /** The reason for this advice */ public String reason() { return reason; } private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); } private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true, "Scaling due to load changes"); } @Override public String toString() { return "autoscaling advice: " + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); } } }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, NodeList clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, NodeList clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none("Autoscaling is not enabled"); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } private Advice autoscale(Cluster cluster, NodeList clusterNodes, Limits limits, boolean exclusive) { if ( ! stable(clusterNodes, nodeRepository)) return Advice.none("Cluster change in progress"); Duration scalingWindow = scalingWindow(clusterNodes.clusterSpec(), cluster); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(nodeRepository.clock().instant().minus(scalingWindow), cluster, clusterNodes, metricsDb); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive()); int measurementsPerNode = clusterTimeseries.measurementsPerNode(); if (measurementsPerNode < minimumMeasurementsPerNode(scalingWindow)) return Advice.none("Collecting more data before making new scaling decisions" + " (has " + measurementsPerNode + " measurements per node but need " + minimumMeasurementsPerNode(scalingWindow) + ")"); int nodesMeasured = clusterTimeseries.nodesMeasured(); if (nodesMeasured != clusterNodes.size()) return Advice.none("Collecting more data before making new scaling decisions" + "(has measurements from " + nodesMeasured + " but need from " + clusterNodes.size() + ")"); double cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); double memoryLoad = clusterTimeseries.averageLoad(Resource.memory); double diskLoad = clusterTimeseries.averageLoad(Resource.disk); var target = ResourceTarget.idealLoad(cpuLoad, memoryLoad, diskLoad, currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale("No allocation changes are possible within configured limits"); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale("Cluster is ideally scaled (within configured limits)"); if (scaledIn(scalingWindow, cluster)) return Advice.dontScale("Won't autoscale now: Less than " + scalingWindow + " since last rescaling"); if (isDownscaling(bestAllocation.get(), currentAllocation) && scaledIn(scalingWindow.multipliedBy(3), cluster)) return Advice.dontScale("Waiting " + scalingWindow.multipliedBy(3) + " since last rescaling before reducing resources"); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** Returns true if this reduces total resources in any dimension */ private boolean isDownscaling(AllocatableClusterResources target, AllocatableClusterResources current) { NodeResources targetTotal = target.toAdvertisedClusterResources().totalResources(); NodeResources currentTotal = current.toAdvertisedClusterResources().totalResources(); return ! targetTotal.justNumbers().satisfies(currentTotal.justNumbers()); } private boolean scaledIn(Duration delay, Cluster cluster) { return cluster.lastScalingEvent().map(event -> event.at()).orElse(Instant.MIN) .isAfter(nodeRepository.clock().instant().minus(delay)); } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ private Duration scalingWindow(ClusterSpec clusterSpec, Cluster cluster) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(event.duration().get()); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative()) return Duration.ofHours(12); if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); return predictedDuration; } } static Duration maxScalingWindow() { return Duration.ofHours(48); } /** Returns the minimum measurements per node (average) we require to give autoscaling advice.*/ public static boolean stable(NodeList nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return false; if (nodeRepository.getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) return false; return true; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private final String reason; private Advice(Optional<ClusterResources> target, boolean present, String reason) { this.target = target; this.present = present; this.reason = Objects.requireNonNull(reason); } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } /** The reason for this advice */ public String reason() { return reason; } private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); } private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true, "Scaling due to load changes"); } @Override public String toString() { return "autoscaling advice: " + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); } } }
I always have to think for 1 second when I see max implementing min, so some times I try to do without.
private int minimumMeasurementsPerNode(Duration scalingWindow) { long minimumMeasurements = scalingWindow.toMinutes() / 5; minimumMeasurements = Math.round(0.8 * minimumMeasurements); if (minimumMeasurements < 1) minimumMeasurements = 1; return (int)minimumMeasurements; }
return (int)minimumMeasurements;
private int minimumMeasurementsPerNode(Duration scalingWindow) { long minimumMeasurements = scalingWindow.toMinutes() / 5; minimumMeasurements = Math.round(0.8 * minimumMeasurements); if (minimumMeasurements < 1) minimumMeasurements = 1; return (int)minimumMeasurements; }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, NodeList clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, NodeList clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none("Autoscaling is not enabled"); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } private Advice autoscale(Cluster cluster, NodeList clusterNodes, Limits limits, boolean exclusive) { if ( ! stable(clusterNodes, nodeRepository)) return Advice.none("Cluster change in progress"); Duration scalingWindow = scalingWindow(clusterNodes.clusterSpec(), cluster); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(nodeRepository.clock().instant().minus(scalingWindow), cluster, clusterNodes, metricsDb); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive()); int measurementsPerNode = clusterTimeseries.measurementsPerNode(); if (measurementsPerNode < minimumMeasurementsPerNode(scalingWindow)) return Advice.none("Collecting more data before making new scaling decisions" + " (has " + measurementsPerNode + " measurements per node but need " + minimumMeasurementsPerNode(scalingWindow) + ")"); int nodesMeasured = clusterTimeseries.nodesMeasured(); if (nodesMeasured != clusterNodes.size()) return Advice.none("Collecting more data before making new scaling decisions" + "(has measurements from " + nodesMeasured + " but need from " + clusterNodes.size() + ")"); double cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); double memoryLoad = clusterTimeseries.averageLoad(Resource.memory); double diskLoad = clusterTimeseries.averageLoad(Resource.disk); var target = ResourceTarget.idealLoad(cpuLoad, memoryLoad, diskLoad, currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale("No allocation changes are possible within configured limits"); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale("Cluster is ideally scaled (within configured limits)"); if (scaledIn(scalingWindow, cluster)) return Advice.dontScale("Won't autoscale now: Less than " + scalingWindow + " since last rescaling"); if (isDownscaling(bestAllocation.get(), currentAllocation) && scaledIn(scalingWindow.multipliedBy(3), cluster)) return Advice.dontScale("Waiting " + scalingWindow.multipliedBy(3) + " since last rescaling before reducing resources"); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** Returns true if this reduces total resources in any dimension */ private boolean isDownscaling(AllocatableClusterResources target, AllocatableClusterResources current) { NodeResources targetTotal = target.toAdvertisedClusterResources().totalResources(); NodeResources currentTotal = current.toAdvertisedClusterResources().totalResources(); return ! targetTotal.justNumbers().satisfies(currentTotal.justNumbers()); } private boolean scaledIn(Duration delay, Cluster cluster) { return cluster.lastScalingEvent().map(event -> event.at()).orElse(Instant.MIN) .isAfter(nodeRepository.clock().instant().minus(delay)); } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ private Duration scalingWindow(ClusterSpec clusterSpec, Cluster cluster) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(event.duration().get()); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative()) return Duration.ofHours(12); if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); return predictedDuration; } } static Duration maxScalingWindow() { return Duration.ofHours(48); } /** Returns the minimum measurements per node (average) we require to give autoscaling advice.*/ public static boolean stable(NodeList nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return false; if (nodeRepository.getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) return false; return true; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private final String reason; private Advice(Optional<ClusterResources> target, boolean present, String reason) { this.target = target; this.present = present; this.reason = Objects.requireNonNull(reason); } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } /** The reason for this advice */ public String reason() { return reason; } private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); } private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true, "Scaling due to load changes"); } @Override public String toString() { return "autoscaling advice: " + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); } } }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, NodeList clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, NodeList clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none("Autoscaling is not enabled"); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } private Advice autoscale(Cluster cluster, NodeList clusterNodes, Limits limits, boolean exclusive) { if ( ! stable(clusterNodes, nodeRepository)) return Advice.none("Cluster change in progress"); Duration scalingWindow = scalingWindow(clusterNodes.clusterSpec(), cluster); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(nodeRepository.clock().instant().minus(scalingWindow), cluster, clusterNodes, metricsDb); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive()); int measurementsPerNode = clusterTimeseries.measurementsPerNode(); if (measurementsPerNode < minimumMeasurementsPerNode(scalingWindow)) return Advice.none("Collecting more data before making new scaling decisions" + " (has " + measurementsPerNode + " measurements per node but need " + minimumMeasurementsPerNode(scalingWindow) + ")"); int nodesMeasured = clusterTimeseries.nodesMeasured(); if (nodesMeasured != clusterNodes.size()) return Advice.none("Collecting more data before making new scaling decisions" + "(has measurements from " + nodesMeasured + " but need from " + clusterNodes.size() + ")"); double cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); double memoryLoad = clusterTimeseries.averageLoad(Resource.memory); double diskLoad = clusterTimeseries.averageLoad(Resource.disk); var target = ResourceTarget.idealLoad(cpuLoad, memoryLoad, diskLoad, currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale("No allocation changes are possible within configured limits"); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale("Cluster is ideally scaled (within configured limits)"); if (scaledIn(scalingWindow, cluster)) return Advice.dontScale("Won't autoscale now: Less than " + scalingWindow + " since last rescaling"); if (isDownscaling(bestAllocation.get(), currentAllocation) && scaledIn(scalingWindow.multipliedBy(3), cluster)) return Advice.dontScale("Waiting " + scalingWindow.multipliedBy(3) + " since last rescaling before reducing resources"); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** Returns true if this reduces total resources in any dimension */ private boolean isDownscaling(AllocatableClusterResources target, AllocatableClusterResources current) { NodeResources targetTotal = target.toAdvertisedClusterResources().totalResources(); NodeResources currentTotal = current.toAdvertisedClusterResources().totalResources(); return ! targetTotal.justNumbers().satisfies(currentTotal.justNumbers()); } private boolean scaledIn(Duration delay, Cluster cluster) { return cluster.lastScalingEvent().map(event -> event.at()).orElse(Instant.MIN) .isAfter(nodeRepository.clock().instant().minus(delay)); } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ private Duration scalingWindow(ClusterSpec clusterSpec, Cluster cluster) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(event.duration().get()); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative()) return Duration.ofHours(12); if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); return predictedDuration; } } static Duration maxScalingWindow() { return Duration.ofHours(48); } /** Returns the minimum measurements per node (average) we require to give autoscaling advice.*/ public static boolean stable(NodeList nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return false; if (nodeRepository.getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) return false; return true; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private final String reason; private Advice(Optional<ClusterResources> target, boolean present, String reason) { this.target = target; this.present = present; this.reason = Objects.requireNonNull(reason); } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } /** The reason for this advice */ public String reason() { return reason; } private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); } private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true, "Scaling due to load changes"); } @Override public String toString() { return "autoscaling advice: " + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); } } }
This feels sufficiently magical that I think it'd be good to encapsulate it in a `createJsonResponseWithMessage`-ish method and only do the wizardry once there
public HttpResponse handleDELETE(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); if (isReindexingRequest(request)) { applicationRepository.modifyReindexing(applicationId, reindexing -> reindexing.enabled(false)); return new JSONResponse(Response.Status.OK) { { object.setString("message", "Reindexing disabled"); } }; } if (applicationRepository.delete(applicationId)) return new DeleteApplicationResponse(Response.Status.OK, applicationId); return HttpErrorResponse.notFoundError("Unable to delete " + applicationId.toFullString() + ": Not found"); }
return new JSONResponse(Response.Status.OK) { { object.setString("message", "Reindexing disabled"); } };
public HttpResponse handleDELETE(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); if (isReindexingRequest(request)) { applicationRepository.modifyReindexing(applicationId, reindexing -> reindexing.enabled(false)); return createMessageResponse("Reindexing disabled"); } if (applicationRepository.delete(applicationId)) return new DeleteApplicationResponse(Response.Status.OK, applicationId); return HttpErrorResponse.notFoundError("Unable to delete " + applicationId.toFullString() + ": Not found"); }
class ApplicationHandler extends HttpHandler { private static final List<UriPattern> URI_PATTERNS = Stream.of( "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: .map(UriPattern::new) .collect(toList()); private final Zone zone; private final ApplicationRepository applicationRepository; @Inject public ApplicationHandler(HttpHandler.Context ctx, Zone zone, ApplicationRepository applicationRepository) { super(ctx); this.zone = zone; this.applicationRepository = applicationRepository; } @Override @Override public HttpResponse handleGET(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); Duration timeout = HttpHandler.getRequestTimeout(request, Duration.ofSeconds(5)); if (isServiceConvergeRequest(request)) { String hostAndPort = getHostNameFromRequest(request); return applicationRepository.checkServiceForConfigConvergence(applicationId, hostAndPort, request.getUri(), timeout, getVespaVersionFromRequest(request)); } if (isClusterControllerStatusRequest(request)) { String hostName = getHostNameFromRequest(request); String pathSuffix = getPathSuffix(request); return applicationRepository.clusterControllerStatusPage(applicationId, hostName, pathSuffix); } if (isReindexingRequest(request)) { return getReindexingStatus(applicationId); } if (isContentRequest(request)) { long sessionId = applicationRepository.getSessionIdForApplication(applicationId); String contentPath = getBindingMatch(request).group(7); ApplicationFile applicationFile = applicationRepository.getApplicationFileFromSession(applicationId.tenant(), sessionId, contentPath, ContentRequest.getApplicationFileMode(request.getMethod())); ApplicationContentRequest contentRequest = new ApplicationContentRequest(request, sessionId, applicationId, zone, contentPath, applicationFile); return new ContentHandler().get(contentRequest); } if (isServiceConvergeListRequest(request)) { return applicationRepository.servicesToCheckForConfigConvergence(applicationId, request.getUri(), timeout, getVespaVersionFromRequest(request)); } if (isFiledistributionStatusRequest(request)) { return applicationRepository.filedistributionStatus(applicationId, timeout); } if (isLogRequest(request)) { Optional<String> hostname = Optional.ofNullable(request.getProperty("hostname")); String apiParams = Optional.ofNullable(request.getUri().getQuery()).map(q -> "?" + q).orElse(""); return applicationRepository.getLogs(applicationId, hostname, apiParams); } if (isProtonMetricsRequest(request)) { return applicationRepository.getProtonMetrics(applicationId); } if (isDeploymentMetricsRequest(request)) { return applicationRepository.getDeploymentMetrics(applicationId); } if (isIsSuspendedRequest(request)) { return new ApplicationSuspendedResponse(applicationRepository.isSuspended(applicationId)); } if (isTesterRequest(request)) { String testerCommand = getTesterCommandFromRequest(request); switch (testerCommand) { case "status": return applicationRepository.getTesterStatus(applicationId); case "log": Long after = Long.valueOf(request.getProperty("after")); return applicationRepository.getTesterLog(applicationId, after); case "ready": return applicationRepository.isTesterReady(applicationId); case "report": return applicationRepository.getTestReport(applicationId); default: throw new IllegalArgumentException("Unknown tester command in request " + request.getUri().toString()); } } if (isQuotaUsageRequest(request)) { var quotaUsageRate = applicationRepository.getQuotaUsageRate(applicationId); return new QuotaUsageResponse(quotaUsageRate); } return getApplicationResponse(applicationId); } GetApplicationResponse getApplicationResponse(ApplicationId applicationId) { return new GetApplicationResponse(Response.Status.OK, applicationRepository.getApplicationGeneration(applicationId), applicationRepository.getAllVersions(applicationId), applicationRepository.getApplicationPackageReference(applicationId)); } @Override public HttpResponse handlePOST(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); if (isRestartRequest(request)) return restart(request, applicationId); if (isTesterStartTestsRequest(request)) { byte[] data; try { data = IOUtils.readBytes(request.getData(), 1024 * 1000); } catch (IOException e) { throw new IllegalArgumentException("Could not read data in request " + request); } return applicationRepository.startTests(applicationId, getSuiteFromRequest(request), data); } if (isReindexRequest(request)) { return triggerReindexing(request, applicationId); } if (isReindexingRequest(request)) { applicationRepository.modifyReindexing(applicationId, reindexing -> reindexing.enabled(true)); return new JSONResponse(Response.Status.OK) { { object.setString("message", "Reindexing enabled"); } }; } throw new NotFoundException("Illegal POST request '" + request.getUri() + "'"); } private HttpResponse triggerReindexing(HttpRequest request, ApplicationId applicationId) { Set<String> clusters = StringUtilities.split(request.getProperty("clusterId")); Set<String> types = StringUtilities.split(request.getProperty("documentType")); Instant now = applicationRepository.clock().instant(); applicationRepository.modifyReindexing(applicationId, reindexing -> { if (clusters.isEmpty()) reindexing = reindexing.withReady(now); else for (String cluster : clusters) if (types.isEmpty()) reindexing = reindexing.withReady(cluster, now); else for (String type : types) reindexing = reindexing.withReady(cluster, type, now); return reindexing; }); String message = "Reindexing " + (clusters.isEmpty() ? "" : (types.isEmpty() ? "" : "document types " + String.join(", ", types) + " in ") + "clusters " + String.join(", ", clusters) + " of ") + "application " + applicationId; return new JSONResponse(Response.Status.OK) { { object.setString("message", message); } }; } private HttpResponse getReindexingStatus(ApplicationId applicationId) { Tenant tenant = applicationRepository.getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant().value() + "' not found"); return new ReindexingResponse(tenant.getApplicationRepo().database() .readReindexingStatus(applicationId) .orElseThrow(() -> new NotFoundException("Reindexing status not found for " + applicationId)), applicationRepository.getClusterReindexingStatus(applicationId)); } private HttpResponse restart(HttpRequest request, ApplicationId applicationId) { if (getBindingMatch(request).groupCount() != 7) throw new NotFoundException("Illegal POST restart request '" + request.getUri() + "': Must have 6 arguments but had " + (getBindingMatch(request).groupCount() - 1)); applicationRepository.restart(applicationId, hostFilterFrom(request)); return new JSONResponse(Response.Status.OK); } private HostFilter hostFilterFrom(HttpRequest request) { return HostFilter.from(request.getProperty("hostname"), request.getProperty("flavor"), request.getProperty("clusterType"), request.getProperty("clusterId")); } private static BindingMatch<?> getBindingMatch(HttpRequest request) { return URI_PATTERNS.stream() .map(pattern -> { UriPattern.Match match = pattern.match(request.getUri()); if (match == null) return null; return new BindingMatch<>(match, new Object(), pattern); }) .filter(Objects::nonNull) .findFirst() .orElseThrow(() -> new IllegalArgumentException("Illegal url for config request: " + request.getUri())); } private static boolean isRestartRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/restart"); } private static boolean isReindexRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/reindex"); } private static boolean isReindexingRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/reindexing"); } private static boolean isIsSuspendedRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/suspended"); } private static boolean isProtonMetricsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().endsWith("/metrics/proton"); } private static boolean isDeploymentMetricsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().endsWith("/metrics/deployment"); } private static boolean isLogRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/logs"); } private static boolean isServiceConvergeListRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/serviceconverge"); } private static boolean isServiceConvergeRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().contains("/serviceconverge/"); } private static boolean isClusterControllerStatusRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 9 && request.getUri().getPath().contains("/clustercontroller/"); } private static boolean isContentRequest(HttpRequest request) { return getBindingMatch(request).groupCount() > 7 && request.getUri().getPath().contains("/content/"); } private static boolean isFiledistributionStatusRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().contains("/filedistributionstatus"); } private static boolean isTesterRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().contains("/tester"); } private static boolean isTesterStartTestsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 9 && request.getUri().getPath().contains("/tester/run/"); } private static boolean isQuotaUsageRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/quota"); } private static String getHostNameFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(7); } private static String getTesterCommandFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(7); } private static String getSuiteFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(8); } private static String getPathSuffix(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(8); } private static ApplicationId getApplicationIdFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); if (bm.groupCount() > 4) return createFromRequestFullAppId(bm); return createFromRequestSimpleAppId(bm); } private static ApplicationId createFromRequestSimpleAppId(BindingMatch<?> bm) { TenantName tenant = TenantName.from(bm.group(2)); ApplicationName application = ApplicationName.from(bm.group(3)); return new ApplicationId.Builder().tenant(tenant).applicationName(application).build(); } private static ApplicationId createFromRequestFullAppId(BindingMatch<?> bm) { String tenant = bm.group(2); String application = bm.group(3); String instance = bm.group(6); return new ApplicationId.Builder() .tenant(tenant) .applicationName(application).instanceName(instance) .build(); } private static Optional<Version> getVespaVersionFromRequest(HttpRequest request) { String vespaVersion = request.getProperty("vespaVersion"); return (vespaVersion == null || vespaVersion.isEmpty()) ? Optional.empty() : Optional.of(Version.fromString(vespaVersion)); } private static class DeleteApplicationResponse extends JSONResponse { DeleteApplicationResponse(int status, ApplicationId applicationId) { super(status); object.setString("message", "Application '" + applicationId + "' deleted"); } } private static class GetApplicationResponse extends JSONResponse { GetApplicationResponse(int status, long generation, List<Version> modelVersions, Optional<String> applicationPackageReference) { super(status); object.setLong("generation", generation); object.setString("applicationPackageFileReference", applicationPackageReference.orElse("")); Cursor modelVersionArray = object.setArray("modelVersions"); modelVersions.forEach(version -> modelVersionArray.addString(version.toFullString())); } } private static class ApplicationSuspendedResponse extends JSONResponse { ApplicationSuspendedResponse(boolean suspended) { super(Response.Status.OK); object.setBool("suspended", suspended); } } private static class QuotaUsageResponse extends JSONResponse { QuotaUsageResponse(double usageRate) { super(Response.Status.OK); object.setDouble("rate", usageRate); } } static class ReindexingResponse extends JSONResponse { ReindexingResponse(ApplicationReindexing reindexing, Map<String, ClusterReindexing> clusters) { super(Response.Status.OK); object.setBool("enabled", reindexing.enabled()); setStatus(object.setObject("status"), reindexing.common()); Cursor clustersObject = object.setObject("clusters"); Stream<String> clusterNames = Stream.concat(clusters.keySet().stream(), reindexing.clusters().keySet().stream()); clusterNames.sorted() .forEach(clusterName -> { Cursor clusterObject = clustersObject.setObject(clusterName); Cursor pendingObject = clusterObject.setObject("pending"); Cursor readyObject = clusterObject.setObject("ready"); Map<String, Cursor> statuses = new HashMap<>(); if (reindexing.clusters().containsKey(clusterName)) { setStatus(clusterObject.setObject("status"), reindexing.clusters().get(clusterName).common()); reindexing.clusters().get(clusterName).pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> pendingObject.setLong(pending.getKey(), pending.getValue())); reindexing.clusters().get(clusterName).ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> setStatus(statuses.computeIfAbsent(ready.getKey(), readyObject::setObject), ready.getValue())); } if (clusters.containsKey(clusterName)) clusters.get(clusterName).documentTypeStatus().entrySet().stream().sorted(comparingByKey()) .forEach(status -> setStatus(statuses.computeIfAbsent(status.getKey(), readyObject::setObject), status.getValue())); }); } private static void setStatus(Cursor object, ApplicationReindexing.Status readyStatus) { object.setLong("readyMillis", readyStatus.ready().toEpochMilli()); } private static void setStatus(Cursor object, ClusterReindexing.Status status) { object.setLong("startedMillis", status.startedAt().toEpochMilli()); status.endedAt().ifPresent(endedAt -> object.setLong("endedMillis", endedAt.toEpochMilli())); status.state().map(ClusterReindexing.State::asString).ifPresent(state -> object.setString("state", state)); status.message().ifPresent(message -> object.setString("message", message)); status.progress().ifPresent(progress -> object.setDouble("progress", progress)); } } }
class ApplicationHandler extends HttpHandler { private static final List<UriPattern> URI_PATTERNS = Stream.of( "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: .map(UriPattern::new) .collect(toList()); private final Zone zone; private final ApplicationRepository applicationRepository; @Inject public ApplicationHandler(HttpHandler.Context ctx, Zone zone, ApplicationRepository applicationRepository) { super(ctx); this.zone = zone; this.applicationRepository = applicationRepository; } @Override @Override public HttpResponse handleGET(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); Duration timeout = HttpHandler.getRequestTimeout(request, Duration.ofSeconds(5)); if (isServiceConvergeRequest(request)) { String hostAndPort = getHostNameFromRequest(request); return applicationRepository.checkServiceForConfigConvergence(applicationId, hostAndPort, request.getUri(), timeout, getVespaVersionFromRequest(request)); } if (isClusterControllerStatusRequest(request)) { String hostName = getHostNameFromRequest(request); String pathSuffix = getPathSuffix(request); return applicationRepository.clusterControllerStatusPage(applicationId, hostName, pathSuffix); } if (isReindexingRequest(request)) { return getReindexingStatus(applicationId); } if (isContentRequest(request)) { long sessionId = applicationRepository.getSessionIdForApplication(applicationId); String contentPath = getBindingMatch(request).group(7); ApplicationFile applicationFile = applicationRepository.getApplicationFileFromSession(applicationId.tenant(), sessionId, contentPath, ContentRequest.getApplicationFileMode(request.getMethod())); ApplicationContentRequest contentRequest = new ApplicationContentRequest(request, sessionId, applicationId, zone, contentPath, applicationFile); return new ContentHandler().get(contentRequest); } if (isServiceConvergeListRequest(request)) { return applicationRepository.servicesToCheckForConfigConvergence(applicationId, request.getUri(), timeout, getVespaVersionFromRequest(request)); } if (isFiledistributionStatusRequest(request)) { return applicationRepository.filedistributionStatus(applicationId, timeout); } if (isLogRequest(request)) { Optional<String> hostname = Optional.ofNullable(request.getProperty("hostname")); String apiParams = Optional.ofNullable(request.getUri().getQuery()).map(q -> "?" + q).orElse(""); return applicationRepository.getLogs(applicationId, hostname, apiParams); } if (isProtonMetricsRequest(request)) { return applicationRepository.getProtonMetrics(applicationId); } if (isDeploymentMetricsRequest(request)) { return applicationRepository.getDeploymentMetrics(applicationId); } if (isIsSuspendedRequest(request)) { return new ApplicationSuspendedResponse(applicationRepository.isSuspended(applicationId)); } if (isTesterRequest(request)) { String testerCommand = getTesterCommandFromRequest(request); switch (testerCommand) { case "status": return applicationRepository.getTesterStatus(applicationId); case "log": Long after = Long.valueOf(request.getProperty("after")); return applicationRepository.getTesterLog(applicationId, after); case "ready": return applicationRepository.isTesterReady(applicationId); case "report": return applicationRepository.getTestReport(applicationId); default: throw new IllegalArgumentException("Unknown tester command in request " + request.getUri().toString()); } } if (isQuotaUsageRequest(request)) { var quotaUsageRate = applicationRepository.getQuotaUsageRate(applicationId); return new QuotaUsageResponse(quotaUsageRate); } return getApplicationResponse(applicationId); } GetApplicationResponse getApplicationResponse(ApplicationId applicationId) { return new GetApplicationResponse(Response.Status.OK, applicationRepository.getApplicationGeneration(applicationId), applicationRepository.getAllVersions(applicationId), applicationRepository.getApplicationPackageReference(applicationId)); } @Override public HttpResponse handlePOST(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); if (isRestartRequest(request)) return restart(request, applicationId); if (isTesterStartTestsRequest(request)) { byte[] data; try { data = IOUtils.readBytes(request.getData(), 1024 * 1000); } catch (IOException e) { throw new IllegalArgumentException("Could not read data in request " + request); } return applicationRepository.startTests(applicationId, getSuiteFromRequest(request), data); } if (isReindexRequest(request)) { return triggerReindexing(request, applicationId); } if (isReindexingRequest(request)) { applicationRepository.modifyReindexing(applicationId, reindexing -> reindexing.enabled(true)); return createMessageResponse("Reindexing enabled"); } throw new NotFoundException("Illegal POST request '" + request.getUri() + "'"); } private HttpResponse triggerReindexing(HttpRequest request, ApplicationId applicationId) { Set<String> clusters = StringUtilities.split(request.getProperty("clusterId")); Set<String> types = StringUtilities.split(request.getProperty("documentType")); Instant now = applicationRepository.clock().instant(); applicationRepository.modifyReindexing(applicationId, reindexing -> { if (clusters.isEmpty()) reindexing = reindexing.withReady(now); else for (String cluster : clusters) if (types.isEmpty()) reindexing = reindexing.withReady(cluster, now); else for (String type : types) reindexing = reindexing.withReady(cluster, type, now); return reindexing; }); String message = "Reindexing " + (clusters.isEmpty() ? "" : (types.isEmpty() ? "" : "document types " + String.join(", ", types) + " in ") + "clusters " + String.join(", ", clusters) + " of ") + "application " + applicationId; return createMessageResponse(message); } private HttpResponse getReindexingStatus(ApplicationId applicationId) { Tenant tenant = applicationRepository.getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant().value() + "' not found"); return new ReindexingResponse(tenant.getApplicationRepo().database() .readReindexingStatus(applicationId) .orElseThrow(() -> new NotFoundException("Reindexing status not found for " + applicationId)), applicationRepository.getClusterReindexingStatus(applicationId)); } private HttpResponse restart(HttpRequest request, ApplicationId applicationId) { if (getBindingMatch(request).groupCount() != 7) throw new NotFoundException("Illegal POST restart request '" + request.getUri() + "': Must have 6 arguments but had " + (getBindingMatch(request).groupCount() - 1)); applicationRepository.restart(applicationId, hostFilterFrom(request)); return new JSONResponse(Response.Status.OK); } private HostFilter hostFilterFrom(HttpRequest request) { return HostFilter.from(request.getProperty("hostname"), request.getProperty("flavor"), request.getProperty("clusterType"), request.getProperty("clusterId")); } private static BindingMatch<?> getBindingMatch(HttpRequest request) { return URI_PATTERNS.stream() .map(pattern -> { UriPattern.Match match = pattern.match(request.getUri()); if (match == null) return null; return new BindingMatch<>(match, new Object(), pattern); }) .filter(Objects::nonNull) .findFirst() .orElseThrow(() -> new IllegalArgumentException("Illegal url for config request: " + request.getUri())); } private static boolean isRestartRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/restart"); } private static boolean isReindexRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/reindex"); } private static boolean isReindexingRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/reindexing"); } private static boolean isIsSuspendedRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/suspended"); } private static boolean isProtonMetricsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().endsWith("/metrics/proton"); } private static boolean isDeploymentMetricsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().endsWith("/metrics/deployment"); } private static boolean isLogRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/logs"); } private static boolean isServiceConvergeListRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/serviceconverge"); } private static boolean isServiceConvergeRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().contains("/serviceconverge/"); } private static boolean isClusterControllerStatusRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 9 && request.getUri().getPath().contains("/clustercontroller/"); } private static boolean isContentRequest(HttpRequest request) { return getBindingMatch(request).groupCount() > 7 && request.getUri().getPath().contains("/content/"); } private static boolean isFiledistributionStatusRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().contains("/filedistributionstatus"); } private static boolean isTesterRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().contains("/tester"); } private static boolean isTesterStartTestsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 9 && request.getUri().getPath().contains("/tester/run/"); } private static boolean isQuotaUsageRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/quota"); } private static String getHostNameFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(7); } private static String getTesterCommandFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(7); } private static String getSuiteFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(8); } private static String getPathSuffix(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(8); } private static ApplicationId getApplicationIdFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); if (bm.groupCount() > 4) return createFromRequestFullAppId(bm); return createFromRequestSimpleAppId(bm); } private static ApplicationId createFromRequestSimpleAppId(BindingMatch<?> bm) { TenantName tenant = TenantName.from(bm.group(2)); ApplicationName application = ApplicationName.from(bm.group(3)); return new ApplicationId.Builder().tenant(tenant).applicationName(application).build(); } private static ApplicationId createFromRequestFullAppId(BindingMatch<?> bm) { String tenant = bm.group(2); String application = bm.group(3); String instance = bm.group(6); return new ApplicationId.Builder() .tenant(tenant) .applicationName(application).instanceName(instance) .build(); } private static Optional<Version> getVespaVersionFromRequest(HttpRequest request) { String vespaVersion = request.getProperty("vespaVersion"); return (vespaVersion == null || vespaVersion.isEmpty()) ? Optional.empty() : Optional.of(Version.fromString(vespaVersion)); } private static class DeleteApplicationResponse extends JSONResponse { DeleteApplicationResponse(int status, ApplicationId applicationId) { super(status); object.setString("message", "Application '" + applicationId + "' deleted"); } } private static class GetApplicationResponse extends JSONResponse { GetApplicationResponse(int status, long generation, List<Version> modelVersions, Optional<String> applicationPackageReference) { super(status); object.setLong("generation", generation); object.setString("applicationPackageFileReference", applicationPackageReference.orElse("")); Cursor modelVersionArray = object.setArray("modelVersions"); modelVersions.forEach(version -> modelVersionArray.addString(version.toFullString())); } } private static class ApplicationSuspendedResponse extends JSONResponse { ApplicationSuspendedResponse(boolean suspended) { super(Response.Status.OK); object.setBool("suspended", suspended); } } private static class QuotaUsageResponse extends JSONResponse { QuotaUsageResponse(double usageRate) { super(Response.Status.OK); object.setDouble("rate", usageRate); } } static class ReindexingResponse extends JSONResponse { ReindexingResponse(ApplicationReindexing reindexing, Map<String, ClusterReindexing> clusters) { super(Response.Status.OK); object.setBool("enabled", reindexing.enabled()); setStatus(object.setObject("status"), reindexing.common()); Cursor clustersObject = object.setObject("clusters"); Stream<String> clusterNames = Stream.concat(clusters.keySet().stream(), reindexing.clusters().keySet().stream()); clusterNames.sorted() .forEach(clusterName -> { Cursor clusterObject = clustersObject.setObject(clusterName); Cursor pendingObject = clusterObject.setObject("pending"); Cursor readyObject = clusterObject.setObject("ready"); Map<String, Cursor> statuses = new HashMap<>(); if (reindexing.clusters().containsKey(clusterName)) { setStatus(clusterObject.setObject("status"), reindexing.clusters().get(clusterName).common()); reindexing.clusters().get(clusterName).pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> pendingObject.setLong(pending.getKey(), pending.getValue())); reindexing.clusters().get(clusterName).ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> setStatus(statuses.computeIfAbsent(ready.getKey(), readyObject::setObject), ready.getValue())); } if (clusters.containsKey(clusterName)) clusters.get(clusterName).documentTypeStatus().entrySet().stream().sorted(comparingByKey()) .forEach(status -> setStatus(statuses.computeIfAbsent(status.getKey(), readyObject::setObject), status.getValue())); }); } private static void setStatus(Cursor object, ApplicationReindexing.Status readyStatus) { object.setLong("readyMillis", readyStatus.ready().toEpochMilli()); } private static void setStatus(Cursor object, ClusterReindexing.Status status) { object.setLong("startedMillis", status.startedAt().toEpochMilli()); status.endedAt().ifPresent(endedAt -> object.setLong("endedMillis", endedAt.toEpochMilli())); status.state().map(ClusterReindexing.State::asString).ifPresent(state -> object.setString("state", state)); status.message().ifPresent(message -> object.setString("message", message)); status.progress().ifPresent(progress -> object.setDouble("progress", progress)); } } private static JSONResponse createMessageResponse(String message) { return new JSONResponse(Response.Status.OK) { { object.setString("message", message); } }; } }
You're right.
public HttpResponse handleDELETE(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); if (isReindexingRequest(request)) { applicationRepository.modifyReindexing(applicationId, reindexing -> reindexing.enabled(false)); return new JSONResponse(Response.Status.OK) { { object.setString("message", "Reindexing disabled"); } }; } if (applicationRepository.delete(applicationId)) return new DeleteApplicationResponse(Response.Status.OK, applicationId); return HttpErrorResponse.notFoundError("Unable to delete " + applicationId.toFullString() + ": Not found"); }
return new JSONResponse(Response.Status.OK) { { object.setString("message", "Reindexing disabled"); } };
public HttpResponse handleDELETE(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); if (isReindexingRequest(request)) { applicationRepository.modifyReindexing(applicationId, reindexing -> reindexing.enabled(false)); return createMessageResponse("Reindexing disabled"); } if (applicationRepository.delete(applicationId)) return new DeleteApplicationResponse(Response.Status.OK, applicationId); return HttpErrorResponse.notFoundError("Unable to delete " + applicationId.toFullString() + ": Not found"); }
class ApplicationHandler extends HttpHandler { private static final List<UriPattern> URI_PATTERNS = Stream.of( "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: .map(UriPattern::new) .collect(toList()); private final Zone zone; private final ApplicationRepository applicationRepository; @Inject public ApplicationHandler(HttpHandler.Context ctx, Zone zone, ApplicationRepository applicationRepository) { super(ctx); this.zone = zone; this.applicationRepository = applicationRepository; } @Override @Override public HttpResponse handleGET(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); Duration timeout = HttpHandler.getRequestTimeout(request, Duration.ofSeconds(5)); if (isServiceConvergeRequest(request)) { String hostAndPort = getHostNameFromRequest(request); return applicationRepository.checkServiceForConfigConvergence(applicationId, hostAndPort, request.getUri(), timeout, getVespaVersionFromRequest(request)); } if (isClusterControllerStatusRequest(request)) { String hostName = getHostNameFromRequest(request); String pathSuffix = getPathSuffix(request); return applicationRepository.clusterControllerStatusPage(applicationId, hostName, pathSuffix); } if (isReindexingRequest(request)) { return getReindexingStatus(applicationId); } if (isContentRequest(request)) { long sessionId = applicationRepository.getSessionIdForApplication(applicationId); String contentPath = getBindingMatch(request).group(7); ApplicationFile applicationFile = applicationRepository.getApplicationFileFromSession(applicationId.tenant(), sessionId, contentPath, ContentRequest.getApplicationFileMode(request.getMethod())); ApplicationContentRequest contentRequest = new ApplicationContentRequest(request, sessionId, applicationId, zone, contentPath, applicationFile); return new ContentHandler().get(contentRequest); } if (isServiceConvergeListRequest(request)) { return applicationRepository.servicesToCheckForConfigConvergence(applicationId, request.getUri(), timeout, getVespaVersionFromRequest(request)); } if (isFiledistributionStatusRequest(request)) { return applicationRepository.filedistributionStatus(applicationId, timeout); } if (isLogRequest(request)) { Optional<String> hostname = Optional.ofNullable(request.getProperty("hostname")); String apiParams = Optional.ofNullable(request.getUri().getQuery()).map(q -> "?" + q).orElse(""); return applicationRepository.getLogs(applicationId, hostname, apiParams); } if (isProtonMetricsRequest(request)) { return applicationRepository.getProtonMetrics(applicationId); } if (isDeploymentMetricsRequest(request)) { return applicationRepository.getDeploymentMetrics(applicationId); } if (isIsSuspendedRequest(request)) { return new ApplicationSuspendedResponse(applicationRepository.isSuspended(applicationId)); } if (isTesterRequest(request)) { String testerCommand = getTesterCommandFromRequest(request); switch (testerCommand) { case "status": return applicationRepository.getTesterStatus(applicationId); case "log": Long after = Long.valueOf(request.getProperty("after")); return applicationRepository.getTesterLog(applicationId, after); case "ready": return applicationRepository.isTesterReady(applicationId); case "report": return applicationRepository.getTestReport(applicationId); default: throw new IllegalArgumentException("Unknown tester command in request " + request.getUri().toString()); } } if (isQuotaUsageRequest(request)) { var quotaUsageRate = applicationRepository.getQuotaUsageRate(applicationId); return new QuotaUsageResponse(quotaUsageRate); } return getApplicationResponse(applicationId); } GetApplicationResponse getApplicationResponse(ApplicationId applicationId) { return new GetApplicationResponse(Response.Status.OK, applicationRepository.getApplicationGeneration(applicationId), applicationRepository.getAllVersions(applicationId), applicationRepository.getApplicationPackageReference(applicationId)); } @Override public HttpResponse handlePOST(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); if (isRestartRequest(request)) return restart(request, applicationId); if (isTesterStartTestsRequest(request)) { byte[] data; try { data = IOUtils.readBytes(request.getData(), 1024 * 1000); } catch (IOException e) { throw new IllegalArgumentException("Could not read data in request " + request); } return applicationRepository.startTests(applicationId, getSuiteFromRequest(request), data); } if (isReindexRequest(request)) { return triggerReindexing(request, applicationId); } if (isReindexingRequest(request)) { applicationRepository.modifyReindexing(applicationId, reindexing -> reindexing.enabled(true)); return new JSONResponse(Response.Status.OK) { { object.setString("message", "Reindexing enabled"); } }; } throw new NotFoundException("Illegal POST request '" + request.getUri() + "'"); } private HttpResponse triggerReindexing(HttpRequest request, ApplicationId applicationId) { Set<String> clusters = StringUtilities.split(request.getProperty("clusterId")); Set<String> types = StringUtilities.split(request.getProperty("documentType")); Instant now = applicationRepository.clock().instant(); applicationRepository.modifyReindexing(applicationId, reindexing -> { if (clusters.isEmpty()) reindexing = reindexing.withReady(now); else for (String cluster : clusters) if (types.isEmpty()) reindexing = reindexing.withReady(cluster, now); else for (String type : types) reindexing = reindexing.withReady(cluster, type, now); return reindexing; }); String message = "Reindexing " + (clusters.isEmpty() ? "" : (types.isEmpty() ? "" : "document types " + String.join(", ", types) + " in ") + "clusters " + String.join(", ", clusters) + " of ") + "application " + applicationId; return new JSONResponse(Response.Status.OK) { { object.setString("message", message); } }; } private HttpResponse getReindexingStatus(ApplicationId applicationId) { Tenant tenant = applicationRepository.getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant().value() + "' not found"); return new ReindexingResponse(tenant.getApplicationRepo().database() .readReindexingStatus(applicationId) .orElseThrow(() -> new NotFoundException("Reindexing status not found for " + applicationId)), applicationRepository.getClusterReindexingStatus(applicationId)); } private HttpResponse restart(HttpRequest request, ApplicationId applicationId) { if (getBindingMatch(request).groupCount() != 7) throw new NotFoundException("Illegal POST restart request '" + request.getUri() + "': Must have 6 arguments but had " + (getBindingMatch(request).groupCount() - 1)); applicationRepository.restart(applicationId, hostFilterFrom(request)); return new JSONResponse(Response.Status.OK); } private HostFilter hostFilterFrom(HttpRequest request) { return HostFilter.from(request.getProperty("hostname"), request.getProperty("flavor"), request.getProperty("clusterType"), request.getProperty("clusterId")); } private static BindingMatch<?> getBindingMatch(HttpRequest request) { return URI_PATTERNS.stream() .map(pattern -> { UriPattern.Match match = pattern.match(request.getUri()); if (match == null) return null; return new BindingMatch<>(match, new Object(), pattern); }) .filter(Objects::nonNull) .findFirst() .orElseThrow(() -> new IllegalArgumentException("Illegal url for config request: " + request.getUri())); } private static boolean isRestartRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/restart"); } private static boolean isReindexRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/reindex"); } private static boolean isReindexingRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/reindexing"); } private static boolean isIsSuspendedRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/suspended"); } private static boolean isProtonMetricsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().endsWith("/metrics/proton"); } private static boolean isDeploymentMetricsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().endsWith("/metrics/deployment"); } private static boolean isLogRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/logs"); } private static boolean isServiceConvergeListRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/serviceconverge"); } private static boolean isServiceConvergeRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().contains("/serviceconverge/"); } private static boolean isClusterControllerStatusRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 9 && request.getUri().getPath().contains("/clustercontroller/"); } private static boolean isContentRequest(HttpRequest request) { return getBindingMatch(request).groupCount() > 7 && request.getUri().getPath().contains("/content/"); } private static boolean isFiledistributionStatusRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().contains("/filedistributionstatus"); } private static boolean isTesterRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().contains("/tester"); } private static boolean isTesterStartTestsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 9 && request.getUri().getPath().contains("/tester/run/"); } private static boolean isQuotaUsageRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/quota"); } private static String getHostNameFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(7); } private static String getTesterCommandFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(7); } private static String getSuiteFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(8); } private static String getPathSuffix(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(8); } private static ApplicationId getApplicationIdFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); if (bm.groupCount() > 4) return createFromRequestFullAppId(bm); return createFromRequestSimpleAppId(bm); } private static ApplicationId createFromRequestSimpleAppId(BindingMatch<?> bm) { TenantName tenant = TenantName.from(bm.group(2)); ApplicationName application = ApplicationName.from(bm.group(3)); return new ApplicationId.Builder().tenant(tenant).applicationName(application).build(); } private static ApplicationId createFromRequestFullAppId(BindingMatch<?> bm) { String tenant = bm.group(2); String application = bm.group(3); String instance = bm.group(6); return new ApplicationId.Builder() .tenant(tenant) .applicationName(application).instanceName(instance) .build(); } private static Optional<Version> getVespaVersionFromRequest(HttpRequest request) { String vespaVersion = request.getProperty("vespaVersion"); return (vespaVersion == null || vespaVersion.isEmpty()) ? Optional.empty() : Optional.of(Version.fromString(vespaVersion)); } private static class DeleteApplicationResponse extends JSONResponse { DeleteApplicationResponse(int status, ApplicationId applicationId) { super(status); object.setString("message", "Application '" + applicationId + "' deleted"); } } private static class GetApplicationResponse extends JSONResponse { GetApplicationResponse(int status, long generation, List<Version> modelVersions, Optional<String> applicationPackageReference) { super(status); object.setLong("generation", generation); object.setString("applicationPackageFileReference", applicationPackageReference.orElse("")); Cursor modelVersionArray = object.setArray("modelVersions"); modelVersions.forEach(version -> modelVersionArray.addString(version.toFullString())); } } private static class ApplicationSuspendedResponse extends JSONResponse { ApplicationSuspendedResponse(boolean suspended) { super(Response.Status.OK); object.setBool("suspended", suspended); } } private static class QuotaUsageResponse extends JSONResponse { QuotaUsageResponse(double usageRate) { super(Response.Status.OK); object.setDouble("rate", usageRate); } } static class ReindexingResponse extends JSONResponse { ReindexingResponse(ApplicationReindexing reindexing, Map<String, ClusterReindexing> clusters) { super(Response.Status.OK); object.setBool("enabled", reindexing.enabled()); setStatus(object.setObject("status"), reindexing.common()); Cursor clustersObject = object.setObject("clusters"); Stream<String> clusterNames = Stream.concat(clusters.keySet().stream(), reindexing.clusters().keySet().stream()); clusterNames.sorted() .forEach(clusterName -> { Cursor clusterObject = clustersObject.setObject(clusterName); Cursor pendingObject = clusterObject.setObject("pending"); Cursor readyObject = clusterObject.setObject("ready"); Map<String, Cursor> statuses = new HashMap<>(); if (reindexing.clusters().containsKey(clusterName)) { setStatus(clusterObject.setObject("status"), reindexing.clusters().get(clusterName).common()); reindexing.clusters().get(clusterName).pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> pendingObject.setLong(pending.getKey(), pending.getValue())); reindexing.clusters().get(clusterName).ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> setStatus(statuses.computeIfAbsent(ready.getKey(), readyObject::setObject), ready.getValue())); } if (clusters.containsKey(clusterName)) clusters.get(clusterName).documentTypeStatus().entrySet().stream().sorted(comparingByKey()) .forEach(status -> setStatus(statuses.computeIfAbsent(status.getKey(), readyObject::setObject), status.getValue())); }); } private static void setStatus(Cursor object, ApplicationReindexing.Status readyStatus) { object.setLong("readyMillis", readyStatus.ready().toEpochMilli()); } private static void setStatus(Cursor object, ClusterReindexing.Status status) { object.setLong("startedMillis", status.startedAt().toEpochMilli()); status.endedAt().ifPresent(endedAt -> object.setLong("endedMillis", endedAt.toEpochMilli())); status.state().map(ClusterReindexing.State::asString).ifPresent(state -> object.setString("state", state)); status.message().ifPresent(message -> object.setString("message", message)); status.progress().ifPresent(progress -> object.setDouble("progress", progress)); } } }
class ApplicationHandler extends HttpHandler { private static final List<UriPattern> URI_PATTERNS = Stream.of( "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: "http: .map(UriPattern::new) .collect(toList()); private final Zone zone; private final ApplicationRepository applicationRepository; @Inject public ApplicationHandler(HttpHandler.Context ctx, Zone zone, ApplicationRepository applicationRepository) { super(ctx); this.zone = zone; this.applicationRepository = applicationRepository; } @Override @Override public HttpResponse handleGET(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); Duration timeout = HttpHandler.getRequestTimeout(request, Duration.ofSeconds(5)); if (isServiceConvergeRequest(request)) { String hostAndPort = getHostNameFromRequest(request); return applicationRepository.checkServiceForConfigConvergence(applicationId, hostAndPort, request.getUri(), timeout, getVespaVersionFromRequest(request)); } if (isClusterControllerStatusRequest(request)) { String hostName = getHostNameFromRequest(request); String pathSuffix = getPathSuffix(request); return applicationRepository.clusterControllerStatusPage(applicationId, hostName, pathSuffix); } if (isReindexingRequest(request)) { return getReindexingStatus(applicationId); } if (isContentRequest(request)) { long sessionId = applicationRepository.getSessionIdForApplication(applicationId); String contentPath = getBindingMatch(request).group(7); ApplicationFile applicationFile = applicationRepository.getApplicationFileFromSession(applicationId.tenant(), sessionId, contentPath, ContentRequest.getApplicationFileMode(request.getMethod())); ApplicationContentRequest contentRequest = new ApplicationContentRequest(request, sessionId, applicationId, zone, contentPath, applicationFile); return new ContentHandler().get(contentRequest); } if (isServiceConvergeListRequest(request)) { return applicationRepository.servicesToCheckForConfigConvergence(applicationId, request.getUri(), timeout, getVespaVersionFromRequest(request)); } if (isFiledistributionStatusRequest(request)) { return applicationRepository.filedistributionStatus(applicationId, timeout); } if (isLogRequest(request)) { Optional<String> hostname = Optional.ofNullable(request.getProperty("hostname")); String apiParams = Optional.ofNullable(request.getUri().getQuery()).map(q -> "?" + q).orElse(""); return applicationRepository.getLogs(applicationId, hostname, apiParams); } if (isProtonMetricsRequest(request)) { return applicationRepository.getProtonMetrics(applicationId); } if (isDeploymentMetricsRequest(request)) { return applicationRepository.getDeploymentMetrics(applicationId); } if (isIsSuspendedRequest(request)) { return new ApplicationSuspendedResponse(applicationRepository.isSuspended(applicationId)); } if (isTesterRequest(request)) { String testerCommand = getTesterCommandFromRequest(request); switch (testerCommand) { case "status": return applicationRepository.getTesterStatus(applicationId); case "log": Long after = Long.valueOf(request.getProperty("after")); return applicationRepository.getTesterLog(applicationId, after); case "ready": return applicationRepository.isTesterReady(applicationId); case "report": return applicationRepository.getTestReport(applicationId); default: throw new IllegalArgumentException("Unknown tester command in request " + request.getUri().toString()); } } if (isQuotaUsageRequest(request)) { var quotaUsageRate = applicationRepository.getQuotaUsageRate(applicationId); return new QuotaUsageResponse(quotaUsageRate); } return getApplicationResponse(applicationId); } GetApplicationResponse getApplicationResponse(ApplicationId applicationId) { return new GetApplicationResponse(Response.Status.OK, applicationRepository.getApplicationGeneration(applicationId), applicationRepository.getAllVersions(applicationId), applicationRepository.getApplicationPackageReference(applicationId)); } @Override public HttpResponse handlePOST(HttpRequest request) { ApplicationId applicationId = getApplicationIdFromRequest(request); if (isRestartRequest(request)) return restart(request, applicationId); if (isTesterStartTestsRequest(request)) { byte[] data; try { data = IOUtils.readBytes(request.getData(), 1024 * 1000); } catch (IOException e) { throw new IllegalArgumentException("Could not read data in request " + request); } return applicationRepository.startTests(applicationId, getSuiteFromRequest(request), data); } if (isReindexRequest(request)) { return triggerReindexing(request, applicationId); } if (isReindexingRequest(request)) { applicationRepository.modifyReindexing(applicationId, reindexing -> reindexing.enabled(true)); return createMessageResponse("Reindexing enabled"); } throw new NotFoundException("Illegal POST request '" + request.getUri() + "'"); } private HttpResponse triggerReindexing(HttpRequest request, ApplicationId applicationId) { Set<String> clusters = StringUtilities.split(request.getProperty("clusterId")); Set<String> types = StringUtilities.split(request.getProperty("documentType")); Instant now = applicationRepository.clock().instant(); applicationRepository.modifyReindexing(applicationId, reindexing -> { if (clusters.isEmpty()) reindexing = reindexing.withReady(now); else for (String cluster : clusters) if (types.isEmpty()) reindexing = reindexing.withReady(cluster, now); else for (String type : types) reindexing = reindexing.withReady(cluster, type, now); return reindexing; }); String message = "Reindexing " + (clusters.isEmpty() ? "" : (types.isEmpty() ? "" : "document types " + String.join(", ", types) + " in ") + "clusters " + String.join(", ", clusters) + " of ") + "application " + applicationId; return createMessageResponse(message); } private HttpResponse getReindexingStatus(ApplicationId applicationId) { Tenant tenant = applicationRepository.getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant().value() + "' not found"); return new ReindexingResponse(tenant.getApplicationRepo().database() .readReindexingStatus(applicationId) .orElseThrow(() -> new NotFoundException("Reindexing status not found for " + applicationId)), applicationRepository.getClusterReindexingStatus(applicationId)); } private HttpResponse restart(HttpRequest request, ApplicationId applicationId) { if (getBindingMatch(request).groupCount() != 7) throw new NotFoundException("Illegal POST restart request '" + request.getUri() + "': Must have 6 arguments but had " + (getBindingMatch(request).groupCount() - 1)); applicationRepository.restart(applicationId, hostFilterFrom(request)); return new JSONResponse(Response.Status.OK); } private HostFilter hostFilterFrom(HttpRequest request) { return HostFilter.from(request.getProperty("hostname"), request.getProperty("flavor"), request.getProperty("clusterType"), request.getProperty("clusterId")); } private static BindingMatch<?> getBindingMatch(HttpRequest request) { return URI_PATTERNS.stream() .map(pattern -> { UriPattern.Match match = pattern.match(request.getUri()); if (match == null) return null; return new BindingMatch<>(match, new Object(), pattern); }) .filter(Objects::nonNull) .findFirst() .orElseThrow(() -> new IllegalArgumentException("Illegal url for config request: " + request.getUri())); } private static boolean isRestartRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/restart"); } private static boolean isReindexRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/reindex"); } private static boolean isReindexingRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/reindexing"); } private static boolean isIsSuspendedRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/suspended"); } private static boolean isProtonMetricsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().endsWith("/metrics/proton"); } private static boolean isDeploymentMetricsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().endsWith("/metrics/deployment"); } private static boolean isLogRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/logs"); } private static boolean isServiceConvergeListRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/serviceconverge"); } private static boolean isServiceConvergeRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().contains("/serviceconverge/"); } private static boolean isClusterControllerStatusRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 9 && request.getUri().getPath().contains("/clustercontroller/"); } private static boolean isContentRequest(HttpRequest request) { return getBindingMatch(request).groupCount() > 7 && request.getUri().getPath().contains("/content/"); } private static boolean isFiledistributionStatusRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().contains("/filedistributionstatus"); } private static boolean isTesterRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 8 && request.getUri().getPath().contains("/tester"); } private static boolean isTesterStartTestsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 9 && request.getUri().getPath().contains("/tester/run/"); } private static boolean isQuotaUsageRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 7 && request.getUri().getPath().endsWith("/quota"); } private static String getHostNameFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(7); } private static String getTesterCommandFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(7); } private static String getSuiteFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(8); } private static String getPathSuffix(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); return bm.group(8); } private static ApplicationId getApplicationIdFromRequest(HttpRequest req) { BindingMatch<?> bm = getBindingMatch(req); if (bm.groupCount() > 4) return createFromRequestFullAppId(bm); return createFromRequestSimpleAppId(bm); } private static ApplicationId createFromRequestSimpleAppId(BindingMatch<?> bm) { TenantName tenant = TenantName.from(bm.group(2)); ApplicationName application = ApplicationName.from(bm.group(3)); return new ApplicationId.Builder().tenant(tenant).applicationName(application).build(); } private static ApplicationId createFromRequestFullAppId(BindingMatch<?> bm) { String tenant = bm.group(2); String application = bm.group(3); String instance = bm.group(6); return new ApplicationId.Builder() .tenant(tenant) .applicationName(application).instanceName(instance) .build(); } private static Optional<Version> getVespaVersionFromRequest(HttpRequest request) { String vespaVersion = request.getProperty("vespaVersion"); return (vespaVersion == null || vespaVersion.isEmpty()) ? Optional.empty() : Optional.of(Version.fromString(vespaVersion)); } private static class DeleteApplicationResponse extends JSONResponse { DeleteApplicationResponse(int status, ApplicationId applicationId) { super(status); object.setString("message", "Application '" + applicationId + "' deleted"); } } private static class GetApplicationResponse extends JSONResponse { GetApplicationResponse(int status, long generation, List<Version> modelVersions, Optional<String> applicationPackageReference) { super(status); object.setLong("generation", generation); object.setString("applicationPackageFileReference", applicationPackageReference.orElse("")); Cursor modelVersionArray = object.setArray("modelVersions"); modelVersions.forEach(version -> modelVersionArray.addString(version.toFullString())); } } private static class ApplicationSuspendedResponse extends JSONResponse { ApplicationSuspendedResponse(boolean suspended) { super(Response.Status.OK); object.setBool("suspended", suspended); } } private static class QuotaUsageResponse extends JSONResponse { QuotaUsageResponse(double usageRate) { super(Response.Status.OK); object.setDouble("rate", usageRate); } } static class ReindexingResponse extends JSONResponse { ReindexingResponse(ApplicationReindexing reindexing, Map<String, ClusterReindexing> clusters) { super(Response.Status.OK); object.setBool("enabled", reindexing.enabled()); setStatus(object.setObject("status"), reindexing.common()); Cursor clustersObject = object.setObject("clusters"); Stream<String> clusterNames = Stream.concat(clusters.keySet().stream(), reindexing.clusters().keySet().stream()); clusterNames.sorted() .forEach(clusterName -> { Cursor clusterObject = clustersObject.setObject(clusterName); Cursor pendingObject = clusterObject.setObject("pending"); Cursor readyObject = clusterObject.setObject("ready"); Map<String, Cursor> statuses = new HashMap<>(); if (reindexing.clusters().containsKey(clusterName)) { setStatus(clusterObject.setObject("status"), reindexing.clusters().get(clusterName).common()); reindexing.clusters().get(clusterName).pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> pendingObject.setLong(pending.getKey(), pending.getValue())); reindexing.clusters().get(clusterName).ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> setStatus(statuses.computeIfAbsent(ready.getKey(), readyObject::setObject), ready.getValue())); } if (clusters.containsKey(clusterName)) clusters.get(clusterName).documentTypeStatus().entrySet().stream().sorted(comparingByKey()) .forEach(status -> setStatus(statuses.computeIfAbsent(status.getKey(), readyObject::setObject), status.getValue())); }); } private static void setStatus(Cursor object, ApplicationReindexing.Status readyStatus) { object.setLong("readyMillis", readyStatus.ready().toEpochMilli()); } private static void setStatus(Cursor object, ClusterReindexing.Status status) { object.setLong("startedMillis", status.startedAt().toEpochMilli()); status.endedAt().ifPresent(endedAt -> object.setLong("endedMillis", endedAt.toEpochMilli())); status.state().map(ClusterReindexing.State::asString).ifPresent(state -> object.setString("state", state)); status.message().ifPresent(message -> object.setString("message", message)); status.progress().ifPresent(progress -> object.setDouble("progress", progress)); } } private static JSONResponse createMessageResponse(String message) { return new JSONResponse(Response.Status.OK) { { object.setString("message", message); } }; } }
I think this should have worked as follows: 1. quorumPeer.running should be set to false (under a synchronized(quorumPeer)), which means the quorumPeer thread will eventually exit its thread 2. wait until quorumPeer thread exits with quorumPeer.join(), 3. then do the rest of shutdown() tearing down resources. As it is now (3) happens before (2), tearing down resources that might be used concurrently by the quorum peer thread. And (2) may happen a long time after we return from this method, meaning it may still run concurrently with trying to start another instance of ZK.
public void shutdown() { if (quorumPeer != null) { try { quorumPeer.shutdown(); } catch (RuntimeException e) { LOG.log(Level.SEVERE, "Failed to shut down properly, forcing restart", e); System.exit(1); } } }
quorumPeer.shutdown();
public void shutdown() { if (quorumPeer != null) { try { quorumPeer.shutdown(); } catch (RuntimeException e) { LOG.log(Level.SEVERE, "Failed to shut down properly, forcing restart", e); System.exit(1); } } }
class VespaQuorumPeer extends QuorumPeerMain { private static final Logger LOG = Logger.getLogger(VespaQuorumPeer.class.getName()); public void start(Path path) { initializeAndRun(new String[]{ path.toFile().getAbsolutePath()}); } @Override protected void initializeAndRun(String[] args) { try { super.initializeAndRun(args); } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) { throw new RuntimeException("Exception when initializing or running ZooKeeper server", e); } } }
class VespaQuorumPeer extends QuorumPeerMain { private static final Logger LOG = Logger.getLogger(VespaQuorumPeer.class.getName()); public void start(Path path) { initializeAndRun(new String[]{ path.toFile().getAbsolutePath()}); } @Override protected void initializeAndRun(String[] args) { try { super.initializeAndRun(args); } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) { throw new RuntimeException("Exception when initializing or running ZooKeeper server", e); } } }
Even better ```java if (privateKeyFile == null && certificateFile == null) return Optional.empty(); if (privateKeyFile == null || certificateFile == null) throw ... return Optional.of(...) ```
Optional<CertificateAndKey> certificateAndKey() throws CliArgumentsException { Path certificateFile = fileValue(CERTIFICATE_OPTION).orElse(null); Path privateKeyFile = fileValue(PRIVATE_KEY_OPTION).orElse(null); if ((certificateFile != null && privateKeyFile == null) || (certificateFile == null && privateKeyFile != null)) { throw new CliArgumentsException(String.format("Both '%s' and '%s' must be specified together", CERTIFICATE_OPTION, PRIVATE_KEY_OPTION)); } if (privateKeyFile == null && certificateFile == null) return Optional.empty(); return Optional.of(new CertificateAndKey(certificateFile, privateKeyFile)); }
if ((certificateFile != null && privateKeyFile == null) || (certificateFile == null && privateKeyFile != null)) {
Optional<CertificateAndKey> certificateAndKey() throws CliArgumentsException { Path certificateFile = fileValue(CERTIFICATE_OPTION).orElse(null); Path privateKeyFile = fileValue(PRIVATE_KEY_OPTION).orElse(null); if ((certificateFile == null) != (privateKeyFile == null)) { throw new CliArgumentsException(String.format("Both '%s' and '%s' must be specified together", CERTIFICATE_OPTION, PRIVATE_KEY_OPTION)); } if (privateKeyFile == null && certificateFile == null) return Optional.empty(); return Optional.of(new CertificateAndKey(certificateFile, privateKeyFile)); }
class CliArguments { private static final Options optionsDefinition = createOptions(); private static final String HELP_OPTION = "help"; private static final String VERSION_OPTION = "version"; private static final String ENDPOINT_OPTION = "endpoint"; private static final String FILE_OPTION = "file"; private static final String CONNECTIONS_OPTION = "connections"; private static final String MAX_STREAMS_PER_CONNECTION = "max-streams-per-connection"; private static final String CERTIFICATE_OPTION = "certificate"; private static final String PRIVATE_KEY_OPTION = "private-key"; private static final String CA_CERTIFICATES_OPTION = "ca-certificates"; private static final String DISABLE_SSL_HOSTNAME_VERIFICATION_OPTION = "disable-ssl-hostname-verification"; private final CommandLine arguments; private CliArguments(CommandLine arguments) { this.arguments = arguments; } static CliArguments fromRawArgs(String[] rawArgs) throws CliArgumentsException { CommandLineParser parser = new DefaultParser(); try { return new CliArguments(parser.parse(optionsDefinition, rawArgs)); } catch (ParseException e) { throw new CliArgumentsException(e); } } URI endpoint() throws CliArgumentsException { try { URL url = (URL) arguments.getParsedOptionValue(ENDPOINT_OPTION); if (url == null) throw new CliArgumentsException("Endpoint must be specified"); return url.toURI(); } catch (ParseException | URISyntaxException e) { throw new CliArgumentsException("Invalid endpoint: " + e.getMessage(), e); } } boolean helpSpecified() { return has(HELP_OPTION); } boolean versionSpecified() { return has(VERSION_OPTION); } OptionalInt connections() throws CliArgumentsException { return intValue(CONNECTIONS_OPTION); } OptionalInt maxStreamsPerConnection() throws CliArgumentsException { return intValue(MAX_STREAMS_PER_CONNECTION); } Optional<Path> caCertificates() throws CliArgumentsException { return fileValue(CA_CERTIFICATES_OPTION); } Path inputFile() throws CliArgumentsException { return fileValue(FILE_OPTION) .orElseThrow(() -> new CliArgumentsException("Feed file must be specified")); } boolean sslHostnameVerificationDisabled() { return has(DISABLE_SSL_HOSTNAME_VERIFICATION_OPTION); } private OptionalInt intValue(String option) throws CliArgumentsException { try { Number number = (Number) arguments.getParsedOptionValue(option); return number != null ? OptionalInt.of(number.intValue()) : OptionalInt.empty(); } catch (ParseException e) { throw new CliArgumentsException(String.format("Invalid value for '%s': %s", option, e.getMessage()), e); } } private Optional<Path> fileValue(String option) throws CliArgumentsException { try { File certificateFile = (File) arguments.getParsedOptionValue(option); if (certificateFile == null) return Optional.empty(); return Optional.of(certificateFile.toPath()); } catch (ParseException e) { throw new CliArgumentsException(String.format("Invalid value for '%s': %s", option, e.getMessage()), e); } } private boolean has(String option) { return arguments.hasOption(option); } private static Options createOptions() { return new Options() .addOption(Option.builder() .longOpt(HELP_OPTION) .build()) .addOption(Option.builder() .longOpt(VERSION_OPTION) .build()) .addOption(Option.builder() .longOpt(ENDPOINT_OPTION) .hasArg() .type(URL.class) .build()) .addOption(Option.builder() .longOpt(FILE_OPTION) .type(File.class) .hasArg() .build()) .addOption(Option.builder() .longOpt(CONNECTIONS_OPTION) .hasArg() .type(Number.class) .build()) .addOption(Option.builder() .longOpt(MAX_STREAMS_PER_CONNECTION) .hasArg() .type(Number.class) .build()) .addOption(Option.builder() .longOpt(CONNECTIONS_OPTION) .hasArg() .type(Number.class) .build()) .addOption(Option.builder() .longOpt(CERTIFICATE_OPTION) .type(File.class) .hasArg() .build()) .addOption(Option.builder() .longOpt(PRIVATE_KEY_OPTION) .type(File.class) .hasArg() .build()) .addOption(Option.builder() .longOpt(CA_CERTIFICATES_OPTION) .type(File.class) .hasArg() .build()) .addOption(Option.builder() .longOpt(DISABLE_SSL_HOSTNAME_VERIFICATION_OPTION) .build()); } void printHelp(OutputStream out) { HelpFormatter formatter = new HelpFormatter(); PrintWriter writer = new PrintWriter(out); formatter.printHelp( writer, formatter.getWidth(), "vespa-feed-client <options>", "Vespa feed client (" + Vespa.VERSION + ")", optionsDefinition, formatter.getLeftPadding(), formatter.getDescPadding(), ""); writer.flush(); } static class CliArgumentsException extends Exception { CliArgumentsException(String message, Throwable cause) { super(message, cause); } CliArgumentsException(Throwable cause) { super(cause.getMessage(), cause); } CliArgumentsException(String message) { super(message); } } static class CertificateAndKey { final Path certificateFile; final Path privateKeyFile; CertificateAndKey(Path certificateFile, Path privateKeyFile) { this.certificateFile = certificateFile; this.privateKeyFile = privateKeyFile; } } }
class CliArguments { private static final Options optionsDefinition = createOptions(); private static final String HELP_OPTION = "help"; private static final String VERSION_OPTION = "version"; private static final String ENDPOINT_OPTION = "endpoint"; private static final String FILE_OPTION = "file"; private static final String CONNECTIONS_OPTION = "connections"; private static final String MAX_STREAMS_PER_CONNECTION = "max-streams-per-connection"; private static final String CERTIFICATE_OPTION = "certificate"; private static final String PRIVATE_KEY_OPTION = "private-key"; private static final String CA_CERTIFICATES_OPTION = "ca-certificates"; private static final String DISABLE_SSL_HOSTNAME_VERIFICATION_OPTION = "disable-ssl-hostname-verification"; private final CommandLine arguments; private CliArguments(CommandLine arguments) { this.arguments = arguments; } static CliArguments fromRawArgs(String[] rawArgs) throws CliArgumentsException { CommandLineParser parser = new DefaultParser(); try { return new CliArguments(parser.parse(optionsDefinition, rawArgs)); } catch (ParseException e) { throw new CliArgumentsException(e); } } URI endpoint() throws CliArgumentsException { try { URL url = (URL) arguments.getParsedOptionValue(ENDPOINT_OPTION); if (url == null) throw new CliArgumentsException("Endpoint must be specified"); return url.toURI(); } catch (ParseException | URISyntaxException e) { throw new CliArgumentsException("Invalid endpoint: " + e.getMessage(), e); } } boolean helpSpecified() { return has(HELP_OPTION); } boolean versionSpecified() { return has(VERSION_OPTION); } OptionalInt connections() throws CliArgumentsException { return intValue(CONNECTIONS_OPTION); } OptionalInt maxStreamsPerConnection() throws CliArgumentsException { return intValue(MAX_STREAMS_PER_CONNECTION); } Optional<Path> caCertificates() throws CliArgumentsException { return fileValue(CA_CERTIFICATES_OPTION); } Path inputFile() throws CliArgumentsException { return fileValue(FILE_OPTION) .orElseThrow(() -> new CliArgumentsException("Feed file must be specified")); } boolean sslHostnameVerificationDisabled() { return has(DISABLE_SSL_HOSTNAME_VERIFICATION_OPTION); } private OptionalInt intValue(String option) throws CliArgumentsException { try { Number number = (Number) arguments.getParsedOptionValue(option); return number != null ? OptionalInt.of(number.intValue()) : OptionalInt.empty(); } catch (ParseException e) { throw new CliArgumentsException(String.format("Invalid value for '%s': %s", option, e.getMessage()), e); } } private Optional<Path> fileValue(String option) throws CliArgumentsException { try { File certificateFile = (File) arguments.getParsedOptionValue(option); if (certificateFile == null) return Optional.empty(); return Optional.of(certificateFile.toPath()); } catch (ParseException e) { throw new CliArgumentsException(String.format("Invalid value for '%s': %s", option, e.getMessage()), e); } } private boolean has(String option) { return arguments.hasOption(option); } private static Options createOptions() { return new Options() .addOption(Option.builder() .longOpt(HELP_OPTION) .build()) .addOption(Option.builder() .longOpt(VERSION_OPTION) .build()) .addOption(Option.builder() .longOpt(ENDPOINT_OPTION) .hasArg() .type(URL.class) .build()) .addOption(Option.builder() .longOpt(FILE_OPTION) .type(File.class) .hasArg() .build()) .addOption(Option.builder() .longOpt(CONNECTIONS_OPTION) .hasArg() .type(Number.class) .build()) .addOption(Option.builder() .longOpt(MAX_STREAMS_PER_CONNECTION) .hasArg() .type(Number.class) .build()) .addOption(Option.builder() .longOpt(CONNECTIONS_OPTION) .hasArg() .type(Number.class) .build()) .addOption(Option.builder() .longOpt(CERTIFICATE_OPTION) .type(File.class) .hasArg() .build()) .addOption(Option.builder() .longOpt(PRIVATE_KEY_OPTION) .type(File.class) .hasArg() .build()) .addOption(Option.builder() .longOpt(CA_CERTIFICATES_OPTION) .type(File.class) .hasArg() .build()) .addOption(Option.builder() .longOpt(DISABLE_SSL_HOSTNAME_VERIFICATION_OPTION) .build()); } void printHelp(OutputStream out) { HelpFormatter formatter = new HelpFormatter(); PrintWriter writer = new PrintWriter(out); formatter.printHelp( writer, formatter.getWidth(), "vespa-feed-client <options>", "Vespa feed client (" + Vespa.VERSION + ")", optionsDefinition, formatter.getLeftPadding(), formatter.getDescPadding(), ""); writer.flush(); } static class CliArgumentsException extends Exception { CliArgumentsException(String message, Throwable cause) { super(message, cause); } CliArgumentsException(Throwable cause) { super(cause.getMessage(), cause); } CliArgumentsException(String message) { super(message); } } static class CertificateAndKey { final Path certificateFile; final Path privateKeyFile; CertificateAndKey(Path certificateFile, Path privateKeyFile) { this.certificateFile = certificateFile; this.privateKeyFile = privateKeyFile; } } }
> quorumPeer.running should be set to false (under a synchronized(quorumPeer)), which means the quorumPeer thread will eventually exit its thread `quorumPeer.running` is private though, as far as I can tell we can only initiate shutdown through `quorumPeer.shutdown()` > wait until quorumPeer thread exits with quorumPeer.join(), Yes, will fix. > And (2) may happen a long time after we return from this method, meaning it may still run concurrently with trying to start another instance of ZK. Yes, there any many reasons that the old server will linger. Component destruction only happens after 60s for example. This is why server start is retried.
public void shutdown() { if (quorumPeer != null) { try { quorumPeer.shutdown(); } catch (RuntimeException e) { LOG.log(Level.SEVERE, "Failed to shut down properly, forcing restart", e); System.exit(1); } } }
quorumPeer.shutdown();
public void shutdown() { if (quorumPeer != null) { try { quorumPeer.shutdown(); } catch (RuntimeException e) { LOG.log(Level.SEVERE, "Failed to shut down properly, forcing restart", e); System.exit(1); } } }
class VespaQuorumPeer extends QuorumPeerMain { private static final Logger LOG = Logger.getLogger(VespaQuorumPeer.class.getName()); public void start(Path path) { initializeAndRun(new String[]{ path.toFile().getAbsolutePath()}); } @Override protected void initializeAndRun(String[] args) { try { super.initializeAndRun(args); } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) { throw new RuntimeException("Exception when initializing or running ZooKeeper server", e); } } }
class VespaQuorumPeer extends QuorumPeerMain { private static final Logger LOG = Logger.getLogger(VespaQuorumPeer.class.getName()); public void start(Path path) { initializeAndRun(new String[]{ path.toFile().getAbsolutePath()}); } @Override protected void initializeAndRun(String[] args) { try { super.initializeAndRun(args); } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) { throw new RuntimeException("Exception when initializing or running ZooKeeper server", e); } } }
Isn't there also a 50s timeout on shutdown, somewhere further up?
public void deconstruct(List<Object> components, Collection<Bundle> bundles) { Collection<Deconstructable> destructibleComponents = new ArrayList<>(); for (var component : components) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { destructibleComponents.add(abstractComponent); } } else if (component instanceof Provider) { destructibleComponents.add((Deconstructable) component); } else if (component instanceof SharedResource) { log.log(FINE, () -> "Releasing container reference to resource " + component); ((SharedResource) component).release(); } } if (!destructibleComponents.isEmpty() || !bundles.isEmpty()) { var task = executor.schedule(new DestructComponentTask(destructibleComponents, bundles), delay.getSeconds(), TimeUnit.SECONDS); if (mode.equals(Mode.SHUTDOWN)) { try { log.info("Waiting up to " + SHUTDOWN_DECONSTRUCT_TIMEOUT.toSeconds() + " seconds for all components to deconstruct."); task.get(SHUTDOWN_DECONSTRUCT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.info("Interrupted while waiting for component deconstruction to finish."); Thread.currentThread().interrupt(); } catch (ExecutionException e) { log.warning("Component deconstruction threw an exception: " + e.getMessage()); } catch (TimeoutException e) { log.warning("Component deconstruction timed out."); } } } }
log.info("Waiting up to " + SHUTDOWN_DECONSTRUCT_TIMEOUT.toSeconds() + " seconds for all components to deconstruct.");
public void deconstruct(List<Object> components, Collection<Bundle> bundles) { Collection<Deconstructable> destructibleComponents = new ArrayList<>(); for (var component : components) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { destructibleComponents.add(abstractComponent); } } else if (component instanceof Provider) { destructibleComponents.add((Deconstructable) component); } else if (component instanceof SharedResource) { log.log(FINE, () -> "Releasing container reference to resource " + component); ((SharedResource) component).release(); } } if (!destructibleComponents.isEmpty() || !bundles.isEmpty()) { var task = executor.schedule(new DestructComponentTask(destructibleComponents, bundles), delay.getSeconds(), TimeUnit.SECONDS); if (mode.equals(Mode.SHUTDOWN)) { waitFor(task, SHUTDOWN_DECONSTRUCT_TIMEOUT); } } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private static final Duration SHUTDOWN_DECONSTRUCT_TIMEOUT = Duration.ofMinutes(10); public enum Mode { RECONFIG, SHUTDOWN } final ScheduledExecutorService executor = Executors.newScheduledThreadPool(2, ThreadFactoryFactory.getThreadFactory("component-deconstructor")); private final Mode mode; private final Duration delay; public Deconstructor(Mode mode) { this(mode, (mode == Mode.RECONFIG) ? Duration.ofSeconds(60) : Duration.ZERO); } Deconstructor(Mode mode, Duration reconfigDeconstructDelay) { this.mode = mode; this.delay = reconfigDeconstructDelay; } @Override private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.nanoTime()); private final Collection<Deconstructable> components; private final Collection<Bundle> bundles; DestructComponentTask(Collection<Deconstructable> components, Collection<Bundle> bundles) { this.components = components; this.bundles = bundles; } /** * Returns a random delay between 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { for (var component : components) { log.log(FINE, () -> "Starting deconstruction of " + component); try { component.deconstruct(); log.log(FINE, () -> "Finished deconstructing of " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(Level.SEVERE, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } for (var bundle : bundles) { try { log.log(INFO, "Uninstalling bundle " + bundle); bundle.uninstall(); } catch (BundleException e) { log.log(SEVERE, "Could not uninstall bundle " + bundle); } } } } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private static final Duration RECONFIG_DECONSTRUCT_DELAY = Duration.ofSeconds(60); private static final Duration SHUTDOWN_DECONSTRUCT_TIMEOUT = Duration.ofSeconds(45); public enum Mode { RECONFIG, SHUTDOWN } private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(2, ThreadFactoryFactory.getThreadFactory("component-deconstructor")); private final Mode mode; private final Duration delay; public Deconstructor(Mode mode) { this(mode, (mode == Mode.RECONFIG) ? RECONFIG_DECONSTRUCT_DELAY : Duration.ZERO); } Deconstructor(Mode mode, Duration reconfigDeconstructDelay) { this.mode = mode; this.delay = reconfigDeconstructDelay; } @Override private void waitFor(ScheduledFuture<?> task, Duration timeout) { try { log.info("Waiting up to " + timeout.toSeconds() + " seconds for all components to deconstruct."); task.get(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.info("Interrupted while waiting for component deconstruction to finish."); Thread.currentThread().interrupt(); } catch (ExecutionException e) { log.warning("Component deconstruction threw an exception: " + e.getMessage()); } catch (TimeoutException e) { log.warning("Component deconstruction timed out."); } } private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.nanoTime()); private final Collection<Deconstructable> components; private final Collection<Bundle> bundles; DestructComponentTask(Collection<Deconstructable> components, Collection<Bundle> bundles) { this.components = components; this.bundles = bundles; } /** * Returns a random delay between 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { for (var component : components) { log.log(FINE, () -> "Starting deconstruction of " + component); try { component.deconstruct(); log.log(FINE, () -> "Finished deconstructing of " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(Level.SEVERE, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } for (var bundle : bundles) { try { log.log(INFO, "Uninstalling bundle " + bundle); bundle.uninstall(); } catch (BundleException e) { log.log(SEVERE, "Could not uninstall bundle " + bundle); } } } } }
Yes, you're right. ~~That seems a bit harsh. Perhaps we should increase the "shutdownDeadlineExecutor" to at least 90 sec, and set this one to 80?~~ I don't know the background for that. Maybe it's to ensure shutdown within a reasonable amount of time. What do you think about 45 seconds? In normal use cases, that's probably plenty.
public void deconstruct(List<Object> components, Collection<Bundle> bundles) { Collection<Deconstructable> destructibleComponents = new ArrayList<>(); for (var component : components) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { destructibleComponents.add(abstractComponent); } } else if (component instanceof Provider) { destructibleComponents.add((Deconstructable) component); } else if (component instanceof SharedResource) { log.log(FINE, () -> "Releasing container reference to resource " + component); ((SharedResource) component).release(); } } if (!destructibleComponents.isEmpty() || !bundles.isEmpty()) { var task = executor.schedule(new DestructComponentTask(destructibleComponents, bundles), delay.getSeconds(), TimeUnit.SECONDS); if (mode.equals(Mode.SHUTDOWN)) { try { log.info("Waiting up to " + SHUTDOWN_DECONSTRUCT_TIMEOUT.toSeconds() + " seconds for all components to deconstruct."); task.get(SHUTDOWN_DECONSTRUCT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.info("Interrupted while waiting for component deconstruction to finish."); Thread.currentThread().interrupt(); } catch (ExecutionException e) { log.warning("Component deconstruction threw an exception: " + e.getMessage()); } catch (TimeoutException e) { log.warning("Component deconstruction timed out."); } } } }
log.info("Waiting up to " + SHUTDOWN_DECONSTRUCT_TIMEOUT.toSeconds() + " seconds for all components to deconstruct.");
public void deconstruct(List<Object> components, Collection<Bundle> bundles) { Collection<Deconstructable> destructibleComponents = new ArrayList<>(); for (var component : components) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { destructibleComponents.add(abstractComponent); } } else if (component instanceof Provider) { destructibleComponents.add((Deconstructable) component); } else if (component instanceof SharedResource) { log.log(FINE, () -> "Releasing container reference to resource " + component); ((SharedResource) component).release(); } } if (!destructibleComponents.isEmpty() || !bundles.isEmpty()) { var task = executor.schedule(new DestructComponentTask(destructibleComponents, bundles), delay.getSeconds(), TimeUnit.SECONDS); if (mode.equals(Mode.SHUTDOWN)) { waitFor(task, SHUTDOWN_DECONSTRUCT_TIMEOUT); } } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private static final Duration SHUTDOWN_DECONSTRUCT_TIMEOUT = Duration.ofMinutes(10); public enum Mode { RECONFIG, SHUTDOWN } final ScheduledExecutorService executor = Executors.newScheduledThreadPool(2, ThreadFactoryFactory.getThreadFactory("component-deconstructor")); private final Mode mode; private final Duration delay; public Deconstructor(Mode mode) { this(mode, (mode == Mode.RECONFIG) ? Duration.ofSeconds(60) : Duration.ZERO); } Deconstructor(Mode mode, Duration reconfigDeconstructDelay) { this.mode = mode; this.delay = reconfigDeconstructDelay; } @Override private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.nanoTime()); private final Collection<Deconstructable> components; private final Collection<Bundle> bundles; DestructComponentTask(Collection<Deconstructable> components, Collection<Bundle> bundles) { this.components = components; this.bundles = bundles; } /** * Returns a random delay between 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { for (var component : components) { log.log(FINE, () -> "Starting deconstruction of " + component); try { component.deconstruct(); log.log(FINE, () -> "Finished deconstructing of " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(Level.SEVERE, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } for (var bundle : bundles) { try { log.log(INFO, "Uninstalling bundle " + bundle); bundle.uninstall(); } catch (BundleException e) { log.log(SEVERE, "Could not uninstall bundle " + bundle); } } } } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private static final Duration RECONFIG_DECONSTRUCT_DELAY = Duration.ofSeconds(60); private static final Duration SHUTDOWN_DECONSTRUCT_TIMEOUT = Duration.ofSeconds(45); public enum Mode { RECONFIG, SHUTDOWN } private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(2, ThreadFactoryFactory.getThreadFactory("component-deconstructor")); private final Mode mode; private final Duration delay; public Deconstructor(Mode mode) { this(mode, (mode == Mode.RECONFIG) ? RECONFIG_DECONSTRUCT_DELAY : Duration.ZERO); } Deconstructor(Mode mode, Duration reconfigDeconstructDelay) { this.mode = mode; this.delay = reconfigDeconstructDelay; } @Override private void waitFor(ScheduledFuture<?> task, Duration timeout) { try { log.info("Waiting up to " + timeout.toSeconds() + " seconds for all components to deconstruct."); task.get(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.info("Interrupted while waiting for component deconstruction to finish."); Thread.currentThread().interrupt(); } catch (ExecutionException e) { log.warning("Component deconstruction threw an exception: " + e.getMessage()); } catch (TimeoutException e) { log.warning("Component deconstruction timed out."); } } private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.nanoTime()); private final Collection<Deconstructable> components; private final Collection<Bundle> bundles; DestructComponentTask(Collection<Deconstructable> components, Collection<Bundle> bundles) { this.components = components; this.bundles = bundles; } /** * Returns a random delay between 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { for (var component : components) { log.log(FINE, () -> "Starting deconstruction of " + component); try { component.deconstruct(); log.log(FINE, () -> "Finished deconstructing of " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(Level.SEVERE, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } for (var bundle : bundles) { try { log.log(INFO, "Uninstalling bundle " + bundle); bundle.uninstall(); } catch (BundleException e) { log.log(SEVERE, "Could not uninstall bundle " + bundle); } } } } }
Fixed
private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; List<Future<Long>> futures = new ArrayList<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.add(executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); }
log.log(Level.WARNING, "Could not load session");
private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; Map<Long, Future<?>> futures = new HashMap<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.put(sessionId, executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { e.printStackTrace(); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { List<Future<Long>> futures = new ArrayList<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.add(executor.submit(() -> sessionAdded(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public long sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return sessionId; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); return sessionId; } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ long createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); return sessionId; } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
I think 45s is good!
public void deconstruct(List<Object> components, Collection<Bundle> bundles) { Collection<Deconstructable> destructibleComponents = new ArrayList<>(); for (var component : components) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { destructibleComponents.add(abstractComponent); } } else if (component instanceof Provider) { destructibleComponents.add((Deconstructable) component); } else if (component instanceof SharedResource) { log.log(FINE, () -> "Releasing container reference to resource " + component); ((SharedResource) component).release(); } } if (!destructibleComponents.isEmpty() || !bundles.isEmpty()) { var task = executor.schedule(new DestructComponentTask(destructibleComponents, bundles), delay.getSeconds(), TimeUnit.SECONDS); if (mode.equals(Mode.SHUTDOWN)) { try { log.info("Waiting up to " + SHUTDOWN_DECONSTRUCT_TIMEOUT.toSeconds() + " seconds for all components to deconstruct."); task.get(SHUTDOWN_DECONSTRUCT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.info("Interrupted while waiting for component deconstruction to finish."); Thread.currentThread().interrupt(); } catch (ExecutionException e) { log.warning("Component deconstruction threw an exception: " + e.getMessage()); } catch (TimeoutException e) { log.warning("Component deconstruction timed out."); } } } }
log.info("Waiting up to " + SHUTDOWN_DECONSTRUCT_TIMEOUT.toSeconds() + " seconds for all components to deconstruct.");
public void deconstruct(List<Object> components, Collection<Bundle> bundles) { Collection<Deconstructable> destructibleComponents = new ArrayList<>(); for (var component : components) { if (component instanceof AbstractComponent) { AbstractComponent abstractComponent = (AbstractComponent) component; if (abstractComponent.isDeconstructable()) { destructibleComponents.add(abstractComponent); } } else if (component instanceof Provider) { destructibleComponents.add((Deconstructable) component); } else if (component instanceof SharedResource) { log.log(FINE, () -> "Releasing container reference to resource " + component); ((SharedResource) component).release(); } } if (!destructibleComponents.isEmpty() || !bundles.isEmpty()) { var task = executor.schedule(new DestructComponentTask(destructibleComponents, bundles), delay.getSeconds(), TimeUnit.SECONDS); if (mode.equals(Mode.SHUTDOWN)) { waitFor(task, SHUTDOWN_DECONSTRUCT_TIMEOUT); } } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private static final Duration SHUTDOWN_DECONSTRUCT_TIMEOUT = Duration.ofMinutes(10); public enum Mode { RECONFIG, SHUTDOWN } final ScheduledExecutorService executor = Executors.newScheduledThreadPool(2, ThreadFactoryFactory.getThreadFactory("component-deconstructor")); private final Mode mode; private final Duration delay; public Deconstructor(Mode mode) { this(mode, (mode == Mode.RECONFIG) ? Duration.ofSeconds(60) : Duration.ZERO); } Deconstructor(Mode mode, Duration reconfigDeconstructDelay) { this.mode = mode; this.delay = reconfigDeconstructDelay; } @Override private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.nanoTime()); private final Collection<Deconstructable> components; private final Collection<Bundle> bundles; DestructComponentTask(Collection<Deconstructable> components, Collection<Bundle> bundles) { this.components = components; this.bundles = bundles; } /** * Returns a random delay between 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { for (var component : components) { log.log(FINE, () -> "Starting deconstruction of " + component); try { component.deconstruct(); log.log(FINE, () -> "Finished deconstructing of " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(Level.SEVERE, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } for (var bundle : bundles) { try { log.log(INFO, "Uninstalling bundle " + bundle); bundle.uninstall(); } catch (BundleException e) { log.log(SEVERE, "Could not uninstall bundle " + bundle); } } } } }
class Deconstructor implements ComponentDeconstructor { private static final Logger log = Logger.getLogger(Deconstructor.class.getName()); private static final Duration RECONFIG_DECONSTRUCT_DELAY = Duration.ofSeconds(60); private static final Duration SHUTDOWN_DECONSTRUCT_TIMEOUT = Duration.ofSeconds(45); public enum Mode { RECONFIG, SHUTDOWN } private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(2, ThreadFactoryFactory.getThreadFactory("component-deconstructor")); private final Mode mode; private final Duration delay; public Deconstructor(Mode mode) { this(mode, (mode == Mode.RECONFIG) ? RECONFIG_DECONSTRUCT_DELAY : Duration.ZERO); } Deconstructor(Mode mode, Duration reconfigDeconstructDelay) { this.mode = mode; this.delay = reconfigDeconstructDelay; } @Override private void waitFor(ScheduledFuture<?> task, Duration timeout) { try { log.info("Waiting up to " + timeout.toSeconds() + " seconds for all components to deconstruct."); task.get(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.info("Interrupted while waiting for component deconstruction to finish."); Thread.currentThread().interrupt(); } catch (ExecutionException e) { log.warning("Component deconstruction threw an exception: " + e.getMessage()); } catch (TimeoutException e) { log.warning("Component deconstruction timed out."); } } private static class DestructComponentTask implements Runnable { private final Random random = new Random(System.nanoTime()); private final Collection<Deconstructable> components; private final Collection<Bundle> bundles; DestructComponentTask(Collection<Deconstructable> components, Collection<Bundle> bundles) { this.components = components; this.bundles = bundles; } /** * Returns a random delay between 0 and 10 minutes which will be different across identical containers invoking this at the same time. * Used to randomize restart to avoid simultaneous cluster restarts. */ private Duration getRandomizedShutdownDelay() { long seconds = (long) random.nextDouble() * 60 * 10; return Duration.ofSeconds(seconds); } @Override public void run() { for (var component : components) { log.log(FINE, () -> "Starting deconstruction of " + component); try { component.deconstruct(); log.log(FINE, () -> "Finished deconstructing of " + component); } catch (Exception | NoClassDefFoundError e) { log.log(WARNING, "Exception thrown when deconstructing component " + component, e); } catch (Error e) { try { Duration shutdownDelay = getRandomizedShutdownDelay(); log.log(Level.SEVERE, "Error when deconstructing component " + component + ". Will sleep for " + shutdownDelay.getSeconds() + " seconds then restart", e); Thread.sleep(shutdownDelay.toMillis()); } catch (InterruptedException exception) { log.log(WARNING, "Randomized wait before dying disrupted. Dying now."); } com.yahoo.protect.Process.logAndDie("Shutting down due to error when deconstructing component " + component); } catch (Throwable e) { log.log(WARNING, "Non-error not exception throwable thrown when deconstructing component " + component, e); } } for (var bundle : bundles) { try { log.log(INFO, "Uninstalling bundle " + bundle); bundle.uninstall(); } catch (BundleException e) { log.log(SEVERE, "Could not uninstall bundle " + bundle); } } } } }
Only these test jobs will be triggered automatically. Undeclared jobs may still be triggered by users, though, but that likely won't happen if they aren't shown here :)
public List<StepStatus> allSteps() { List<JobId> firstTestJobs = List.of(firstDeclaredOrElseImplicitTest(systemTest), firstDeclaredOrElseImplicitTest(stagingTest)); return allSteps.stream() .filter(step -> step.isDeclared() || firstTestJobs.contains(step.job().orElseThrow())) .collect(toUnmodifiableList()); }
List<JobId> firstTestJobs = List.of(firstDeclaredOrElseImplicitTest(systemTest),
public List<StepStatus> allSteps() { List<JobId> firstTestJobs = List.of(firstDeclaredOrElseImplicitTest(systemTest), firstDeclaredOrElseImplicitTest(stagingTest)); return allSteps.stream() .filter(step -> step.isDeclared() || firstTestJobs.contains(step.job().orElseThrow())) .collect(toUnmodifiableList()); }
class DeploymentStatus { public static List<JobId> jobsFor(Application application, SystemName system) { if (DeploymentSpec.empty.equals(application.deploymentSpec())) return List.of(); return application.deploymentSpec().instances().stream() .flatMap(spec -> Stream.concat(Stream.of(systemTest, stagingTest), flatten(spec).filter(step -> step.concerns(prod)) .map(step -> { if (step instanceof DeclaredZone) return JobType.from(system, prod, ((DeclaredZone) step).region().get()); return JobType.testFrom(system, ((DeclaredTest) step).region()); }) .flatMap(Optional::stream)) .map(type -> new JobId(application.id().instance(spec.name()), type))) .collect(toUnmodifiableList()); } private static Stream<DeploymentSpec.Step> flatten(DeploymentSpec.Step step) { return step instanceof DeploymentSpec.Steps ? step.steps().stream().flatMap(DeploymentStatus::flatten) : Stream.of(step); } private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final SystemName system; private final Version systemVersion; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Map<JobId, JobStatus> allJobs, SystemName system, Version systemVersion, Instant now) { this.application = requireNonNull(application); this.allJobs = JobList.from(allJobs.values()); this.system = requireNonNull(system); this.systemVersion = requireNonNull(systemVersion); this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps); this.allSteps = List.copyOf(allSteps); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failing() .not().withStatus(RunStatus.outOfCapacity) .isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(Collectors.toUnmodifiableMap(job -> job.id().type(), job -> job)); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.asList().stream() .collect(groupingBy(job -> job.id().application(), collectingAndThen(toUnmodifiableList(), JobList::from))); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any oustanding change, which will likely be needed to lated deploy this change. */ public Map<JobId, List<Versions>> jobsToRun() { Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Versions>> jobs = jobsToRun(changes); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, outstandingChange(instance).onTopOf(application.require(instance).change())); var testJobs = jobsToRun(changes, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), ImmutableMap::copyOf)); } private Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { Map<JobId, Versions> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Versions>> testJobs = testJobs(productionJobs); Map<JobId, List<Versions>> jobs = new LinkedHashMap<>(testJobs); productionJobs.forEach((job, versions) -> jobs.put(job, List.of(versions))); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Optional<JobId> firstProductionJobWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .findFirst(); Versions versions = Versions.from(change, application, firstProductionJobWithDeployment.flatMap(this::deploymentFor), systemVersion); if (step.completedAt(change, firstProductionJobWithDeployment).isEmpty()) jobs.merge(job, List.of(versions), DeploymentStatus::union); }); return ImmutableMap.copyOf(jobs); } /** The set of jobs that need to run for the given changes to be considered complete. */ public Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone(system))); } /** * The change of this application's latest submission, if this upgrades any of its production deployments, * and has not yet started rolling out, due to some other change or a block window being present at the time of submission. */ public Change outstandingChange(InstanceName instance) { return application.latestVersion().map(Change::of) .filter(change -> application.require(instance).change().application().map(change::upgrades).orElse(true)) .filter(change -> ! jobsToRun(Map.of(instance, change)).isEmpty()) .orElse(Change.empty()); } /** * True if the job has already been triggered on the given versions, or if all test types (systemTest, stagingTest), * restricted to the job's instance if declared in that instance, have successful runs on the given versions. */ public boolean isTested(JobId job, Change change) { Versions versions = Versions.from(change, application, deploymentFor(job), systemVersion); return allJobs.triggeredOn(versions).get(job).isPresent() || Stream.of(systemTest, stagingTest) .noneMatch(testType -> declaredTest(job.application(), testType).map(__ -> allJobs.instance(job.application().instance())) .orElse(allJobs) .type(testType) .successOn(versions).isEmpty()); } private Map<JobId, Versions> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { ImmutableMap.Builder<JobId, Versions> jobs = ImmutableMap.builder(); jobSteps.forEach((job, step) -> { Optional<Deployment> deployment = deploymentFor(job) .map(existing -> assumeUpgradesSucceed ? new Deployment(existing.zone(), existing.applicationVersion(), change.platform().orElse(existing.version()), existing.at(), existing.metrics(), existing.activity(), existing.quota()) : existing); if ( job.application().instance().equals(instance) && job.type().isProduction() && step.completedAt(change).isEmpty()) jobs.put(job, Versions.from(change, application, deployment, systemVersion)); }); return jobs.build(); } /** The production jobs that need to run to complete roll-out of the given change to production. */ public Map<JobId, Versions> productionJobs(InstanceName instance, Change change) { return productionJobs(instance, change, false); } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Versions>> testJobs(Map<JobId, Versions> jobs) { Map<JobId, List<Versions>> testJobs = new LinkedHashMap<>(); for (JobType testType : List.of(systemTest, stagingTest)) { jobs.forEach((job, versions) -> { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { if (allJobs.successOn(versions).get(testJob).isEmpty()) testJobs.merge(testJob, List.of(versions), DeploymentStatus::union); }); } }); jobs.forEach((job, versions) -> { if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(versions).type(testType).isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type() == testType && testJobs.get(test).contains(versions))) testJobs.merge(firstDeclaredOrElseImplicitTest(testType), List.of(versions), DeploymentStatus::union); }); } return ImmutableMap.copyOf(testJobs); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null); return ImmutableMap.copyOf(dependencies); } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance) { if (step.steps().isEmpty()) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), null) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, instance, jobType, true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.testFrom(system, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); JobType preType = JobType.from(system, prod, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, instance, jobType, preType); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), ((DeclaredZone) step).region().get()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, instance, jobType); previous = List.of(stepStatus); } else return previous; JobId jobId = new JobId(application.id().instance(instance), jobType); allSteps.removeIf(existing -> existing.job().equals(Optional.of(jobId))); allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); for (JobType test : List.of(systemTest, stagingTest)) { JobId job = new JobId(application.id().instance(instance), test); if ( ! dependencies.containsKey(job)) { var testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test.environment()), List.of(), this, job.application().instance(), test, false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { return dependencies.stream().allMatch(step -> step.completedAt(change, dependent).isPresent()) ? dependencies.stream().map(step -> step.completedAt(change, dependent).get()) .max(naturalOrder()) .or(() -> Optional.of(Instant.EPOCH)) : Optional.empty(); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.application().isEmpty() || change.application().equals(instance.change().application())) || status.jobsToRun(Map.of(instance.name(), change)).isEmpty()) ? dependenciesCompletedAt(change, dependent) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.application().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.application().isPresent() && ! change.application().get().equals(lastVersions.targetApplication())) return Optional.empty(); if (status.application.deploymentSpec().requireInstance(job.id().application().instance()).upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return Optional.empty(); if (job.id().type().environment().isTest() && job.isOutOfCapacity()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); JobStatus job = status.instanceJobs(instance).get(jobType); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(instance) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return super.readyAt(change, Optional.of(job.id())) .filter(__ -> status.isTested(job.id(), change)); } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); Change fullChange = status.application().require(instance).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.applicationVersion()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); return job.lastSuccess() .filter(run -> change.platform().map(run.versions().targetPlatform()::equals).orElse(true) && change.application().map(run.versions().targetApplication()::equals).orElse(true)) .flatMap(Run::end); } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType testType, JobType prodType) { JobStatus job = status.instanceJobs(instance).get(testType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Versions versions = Versions.from(change, status.application, status.deploymentFor(job.id()), status.systemVersion); return job.lastSuccess() .filter(run -> versions.targetsMatch(run.versions())) .filter(run -> ! status.jobs() .instance(instance) .type(prodType) .lastCompleted().endedNoLaterThan(run.start()) .isEmpty()) .map(run -> run.end().get()); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType, boolean declared) { JobStatus job = status.instanceJobs(instance).get(jobType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return RunList.from(job) .matching(run -> run.versions().targetsMatch(Versions.from(change, status.application, dependent.flatMap(status::deploymentFor), status.systemVersion))) .status(RunStatus.success) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } }
class DeploymentStatus { public static List<JobId> jobsFor(Application application, SystemName system) { if (DeploymentSpec.empty.equals(application.deploymentSpec())) return List.of(); return application.deploymentSpec().instances().stream() .flatMap(spec -> Stream.concat(Stream.of(systemTest, stagingTest), flatten(spec).filter(step -> step.concerns(prod)) .map(step -> { if (step instanceof DeclaredZone) return JobType.from(system, prod, ((DeclaredZone) step).region().get()); return JobType.testFrom(system, ((DeclaredTest) step).region()); }) .flatMap(Optional::stream)) .map(type -> new JobId(application.id().instance(spec.name()), type))) .collect(toUnmodifiableList()); } private static Stream<DeploymentSpec.Step> flatten(DeploymentSpec.Step step) { return step instanceof DeploymentSpec.Steps ? step.steps().stream().flatMap(DeploymentStatus::flatten) : Stream.of(step); } private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final SystemName system; private final Version systemVersion; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Map<JobId, JobStatus> allJobs, SystemName system, Version systemVersion, Instant now) { this.application = requireNonNull(application); this.allJobs = JobList.from(allJobs.values()); this.system = requireNonNull(system); this.systemVersion = requireNonNull(systemVersion); this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps); this.allSteps = List.copyOf(allSteps); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failing() .not().withStatus(RunStatus.outOfCapacity) .isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(Collectors.toUnmodifiableMap(job -> job.id().type(), job -> job)); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.asList().stream() .collect(groupingBy(job -> job.id().application(), collectingAndThen(toUnmodifiableList(), JobList::from))); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any oustanding change, which will likely be needed to lated deploy this change. */ public Map<JobId, List<Versions>> jobsToRun() { Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Versions>> jobs = jobsToRun(changes); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, outstandingChange(instance).onTopOf(application.require(instance).change())); var testJobs = jobsToRun(changes, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), ImmutableMap::copyOf)); } private Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { Map<JobId, Versions> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Versions>> testJobs = testJobs(productionJobs); Map<JobId, List<Versions>> jobs = new LinkedHashMap<>(testJobs); productionJobs.forEach((job, versions) -> jobs.put(job, List.of(versions))); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Optional<JobId> firstProductionJobWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .findFirst(); Versions versions = Versions.from(change, application, firstProductionJobWithDeployment.flatMap(this::deploymentFor), systemVersion); if (step.completedAt(change, firstProductionJobWithDeployment).isEmpty()) jobs.merge(job, List.of(versions), DeploymentStatus::union); }); return ImmutableMap.copyOf(jobs); } /** The set of jobs that need to run for the given changes to be considered complete. */ public Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone(system))); } /** * The change of this application's latest submission, if this upgrades any of its production deployments, * and has not yet started rolling out, due to some other change or a block window being present at the time of submission. */ public Change outstandingChange(InstanceName instance) { return application.latestVersion().map(Change::of) .filter(change -> application.require(instance).change().application().map(change::upgrades).orElse(true)) .filter(change -> ! jobsToRun(Map.of(instance, change)).isEmpty()) .orElse(Change.empty()); } /** * True if the job has already been triggered on the given versions, or if all test types (systemTest, stagingTest), * restricted to the job's instance if declared in that instance, have successful runs on the given versions. */ public boolean isTested(JobId job, Change change) { Versions versions = Versions.from(change, application, deploymentFor(job), systemVersion); return allJobs.triggeredOn(versions).get(job).isPresent() || Stream.of(systemTest, stagingTest) .noneMatch(testType -> declaredTest(job.application(), testType).map(__ -> allJobs.instance(job.application().instance())) .orElse(allJobs) .type(testType) .successOn(versions).isEmpty()); } private Map<JobId, Versions> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { ImmutableMap.Builder<JobId, Versions> jobs = ImmutableMap.builder(); jobSteps.forEach((job, step) -> { Optional<Deployment> deployment = deploymentFor(job) .map(existing -> assumeUpgradesSucceed ? new Deployment(existing.zone(), existing.applicationVersion(), change.platform().orElse(existing.version()), existing.at(), existing.metrics(), existing.activity(), existing.quota()) : existing); if ( job.application().instance().equals(instance) && job.type().isProduction() && step.completedAt(change).isEmpty()) jobs.put(job, Versions.from(change, application, deployment, systemVersion)); }); return jobs.build(); } /** The production jobs that need to run to complete roll-out of the given change to production. */ public Map<JobId, Versions> productionJobs(InstanceName instance, Change change) { return productionJobs(instance, change, false); } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Versions>> testJobs(Map<JobId, Versions> jobs) { Map<JobId, List<Versions>> testJobs = new LinkedHashMap<>(); for (JobType testType : List.of(systemTest, stagingTest)) { jobs.forEach((job, versions) -> { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { if (allJobs.successOn(versions).get(testJob).isEmpty()) testJobs.merge(testJob, List.of(versions), DeploymentStatus::union); }); } }); jobs.forEach((job, versions) -> { if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(versions).type(testType).isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type() == testType && testJobs.get(test).contains(versions))) testJobs.merge(firstDeclaredOrElseImplicitTest(testType), List.of(versions), DeploymentStatus::union); }); } return ImmutableMap.copyOf(testJobs); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null); return ImmutableMap.copyOf(dependencies); } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance) { if (step.steps().isEmpty()) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), null) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, instance, jobType, true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.testFrom(system, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); JobType preType = JobType.from(system, prod, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, instance, jobType, preType); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), ((DeclaredZone) step).region().get()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, instance, jobType); previous = List.of(stepStatus); } else return previous; JobId jobId = new JobId(application.id().instance(instance), jobType); allSteps.removeIf(existing -> existing.job().equals(Optional.of(jobId))); allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); for (JobType test : List.of(systemTest, stagingTest)) { JobId job = new JobId(application.id().instance(instance), test); if ( ! dependencies.containsKey(job)) { var testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test.environment()), List.of(), this, job.application().instance(), test, false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { return dependencies.stream().allMatch(step -> step.completedAt(change, dependent).isPresent()) ? dependencies.stream().map(step -> step.completedAt(change, dependent).get()) .max(naturalOrder()) .or(() -> Optional.of(Instant.EPOCH)) : Optional.empty(); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.application().isEmpty() || change.application().equals(instance.change().application())) || status.jobsToRun(Map.of(instance.name(), change)).isEmpty()) ? dependenciesCompletedAt(change, dependent) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.application().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.application().isPresent() && ! change.application().get().equals(lastVersions.targetApplication())) return Optional.empty(); if (status.application.deploymentSpec().requireInstance(job.id().application().instance()).upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return Optional.empty(); if (job.id().type().environment().isTest() && job.isOutOfCapacity()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); JobStatus job = status.instanceJobs(instance).get(jobType); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(instance) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return super.readyAt(change, Optional.of(job.id())) .filter(__ -> status.isTested(job.id(), change)); } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); Change fullChange = status.application().require(instance).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.applicationVersion()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); return job.lastSuccess() .filter(run -> change.platform().map(run.versions().targetPlatform()::equals).orElse(true) && change.application().map(run.versions().targetApplication()::equals).orElse(true)) .flatMap(Run::end); } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType testType, JobType prodType) { JobStatus job = status.instanceJobs(instance).get(testType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Versions versions = Versions.from(change, status.application, status.deploymentFor(job.id()), status.systemVersion); return job.lastSuccess() .filter(run -> versions.targetsMatch(run.versions())) .filter(run -> ! status.jobs() .instance(instance) .type(prodType) .lastCompleted().endedNoLaterThan(run.start()) .isEmpty()) .map(run -> run.end().get()); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType, boolean declared) { JobStatus job = status.instanceJobs(instance).get(jobType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return RunList.from(job) .matching(run -> run.versions().targetsMatch(Versions.from(change, status.application, dependent.flatMap(status::deploymentFor), status.systemVersion))) .status(RunStatus.success) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } }
Fixed
private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { List<Future<Long>> futures = new ArrayList<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.add(executor.submit(() -> sessionAdded(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); }
log.log(Level.WARNING, "Could not load session");
private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { e.printStackTrace(); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; List<Future<Long>> futures = new ArrayList<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.add(executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public long sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return sessionId; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); return sessionId; } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ long createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); return sessionId; } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; Map<Long, Future<?>> futures = new HashMap<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.put(sessionId, executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
When will an implicit job be present here?
public List<StepStatus> allSteps() { List<JobId> firstTestJobs = List.of(firstDeclaredOrElseImplicitTest(systemTest), firstDeclaredOrElseImplicitTest(stagingTest)); return allSteps.stream() .filter(step -> step.isDeclared() || firstTestJobs.contains(step.job().orElseThrow())) .collect(toUnmodifiableList()); }
List<JobId> firstTestJobs = List.of(firstDeclaredOrElseImplicitTest(systemTest),
public List<StepStatus> allSteps() { List<JobId> firstTestJobs = List.of(firstDeclaredOrElseImplicitTest(systemTest), firstDeclaredOrElseImplicitTest(stagingTest)); return allSteps.stream() .filter(step -> step.isDeclared() || firstTestJobs.contains(step.job().orElseThrow())) .collect(toUnmodifiableList()); }
class DeploymentStatus { public static List<JobId> jobsFor(Application application, SystemName system) { if (DeploymentSpec.empty.equals(application.deploymentSpec())) return List.of(); return application.deploymentSpec().instances().stream() .flatMap(spec -> Stream.concat(Stream.of(systemTest, stagingTest), flatten(spec).filter(step -> step.concerns(prod)) .map(step -> { if (step instanceof DeclaredZone) return JobType.from(system, prod, ((DeclaredZone) step).region().get()); return JobType.testFrom(system, ((DeclaredTest) step).region()); }) .flatMap(Optional::stream)) .map(type -> new JobId(application.id().instance(spec.name()), type))) .collect(toUnmodifiableList()); } private static Stream<DeploymentSpec.Step> flatten(DeploymentSpec.Step step) { return step instanceof DeploymentSpec.Steps ? step.steps().stream().flatMap(DeploymentStatus::flatten) : Stream.of(step); } private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final SystemName system; private final Version systemVersion; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Map<JobId, JobStatus> allJobs, SystemName system, Version systemVersion, Instant now) { this.application = requireNonNull(application); this.allJobs = JobList.from(allJobs.values()); this.system = requireNonNull(system); this.systemVersion = requireNonNull(systemVersion); this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps); this.allSteps = List.copyOf(allSteps); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failing() .not().withStatus(RunStatus.outOfCapacity) .isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(Collectors.toUnmodifiableMap(job -> job.id().type(), job -> job)); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.asList().stream() .collect(groupingBy(job -> job.id().application(), collectingAndThen(toUnmodifiableList(), JobList::from))); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any oustanding change, which will likely be needed to lated deploy this change. */ public Map<JobId, List<Versions>> jobsToRun() { Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Versions>> jobs = jobsToRun(changes); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, outstandingChange(instance).onTopOf(application.require(instance).change())); var testJobs = jobsToRun(changes, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), ImmutableMap::copyOf)); } private Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { Map<JobId, Versions> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Versions>> testJobs = testJobs(productionJobs); Map<JobId, List<Versions>> jobs = new LinkedHashMap<>(testJobs); productionJobs.forEach((job, versions) -> jobs.put(job, List.of(versions))); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Optional<JobId> firstProductionJobWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .findFirst(); Versions versions = Versions.from(change, application, firstProductionJobWithDeployment.flatMap(this::deploymentFor), systemVersion); if (step.completedAt(change, firstProductionJobWithDeployment).isEmpty()) jobs.merge(job, List.of(versions), DeploymentStatus::union); }); return ImmutableMap.copyOf(jobs); } /** The set of jobs that need to run for the given changes to be considered complete. */ public Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone(system))); } /** * The change of this application's latest submission, if this upgrades any of its production deployments, * and has not yet started rolling out, due to some other change or a block window being present at the time of submission. */ public Change outstandingChange(InstanceName instance) { return application.latestVersion().map(Change::of) .filter(change -> application.require(instance).change().application().map(change::upgrades).orElse(true)) .filter(change -> ! jobsToRun(Map.of(instance, change)).isEmpty()) .orElse(Change.empty()); } /** * True if the job has already been triggered on the given versions, or if all test types (systemTest, stagingTest), * restricted to the job's instance if declared in that instance, have successful runs on the given versions. */ public boolean isTested(JobId job, Change change) { Versions versions = Versions.from(change, application, deploymentFor(job), systemVersion); return allJobs.triggeredOn(versions).get(job).isPresent() || Stream.of(systemTest, stagingTest) .noneMatch(testType -> declaredTest(job.application(), testType).map(__ -> allJobs.instance(job.application().instance())) .orElse(allJobs) .type(testType) .successOn(versions).isEmpty()); } private Map<JobId, Versions> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { ImmutableMap.Builder<JobId, Versions> jobs = ImmutableMap.builder(); jobSteps.forEach((job, step) -> { Optional<Deployment> deployment = deploymentFor(job) .map(existing -> assumeUpgradesSucceed ? new Deployment(existing.zone(), existing.applicationVersion(), change.platform().orElse(existing.version()), existing.at(), existing.metrics(), existing.activity(), existing.quota()) : existing); if ( job.application().instance().equals(instance) && job.type().isProduction() && step.completedAt(change).isEmpty()) jobs.put(job, Versions.from(change, application, deployment, systemVersion)); }); return jobs.build(); } /** The production jobs that need to run to complete roll-out of the given change to production. */ public Map<JobId, Versions> productionJobs(InstanceName instance, Change change) { return productionJobs(instance, change, false); } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Versions>> testJobs(Map<JobId, Versions> jobs) { Map<JobId, List<Versions>> testJobs = new LinkedHashMap<>(); for (JobType testType : List.of(systemTest, stagingTest)) { jobs.forEach((job, versions) -> { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { if (allJobs.successOn(versions).get(testJob).isEmpty()) testJobs.merge(testJob, List.of(versions), DeploymentStatus::union); }); } }); jobs.forEach((job, versions) -> { if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(versions).type(testType).isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type() == testType && testJobs.get(test).contains(versions))) testJobs.merge(firstDeclaredOrElseImplicitTest(testType), List.of(versions), DeploymentStatus::union); }); } return ImmutableMap.copyOf(testJobs); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null); return ImmutableMap.copyOf(dependencies); } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance) { if (step.steps().isEmpty()) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), null) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, instance, jobType, true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.testFrom(system, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); JobType preType = JobType.from(system, prod, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, instance, jobType, preType); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), ((DeclaredZone) step).region().get()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, instance, jobType); previous = List.of(stepStatus); } else return previous; JobId jobId = new JobId(application.id().instance(instance), jobType); allSteps.removeIf(existing -> existing.job().equals(Optional.of(jobId))); allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); for (JobType test : List.of(systemTest, stagingTest)) { JobId job = new JobId(application.id().instance(instance), test); if ( ! dependencies.containsKey(job)) { var testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test.environment()), List.of(), this, job.application().instance(), test, false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { return dependencies.stream().allMatch(step -> step.completedAt(change, dependent).isPresent()) ? dependencies.stream().map(step -> step.completedAt(change, dependent).get()) .max(naturalOrder()) .or(() -> Optional.of(Instant.EPOCH)) : Optional.empty(); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.application().isEmpty() || change.application().equals(instance.change().application())) || status.jobsToRun(Map.of(instance.name(), change)).isEmpty()) ? dependenciesCompletedAt(change, dependent) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.application().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.application().isPresent() && ! change.application().get().equals(lastVersions.targetApplication())) return Optional.empty(); if (status.application.deploymentSpec().requireInstance(job.id().application().instance()).upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return Optional.empty(); if (job.id().type().environment().isTest() && job.isOutOfCapacity()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); JobStatus job = status.instanceJobs(instance).get(jobType); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(instance) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return super.readyAt(change, Optional.of(job.id())) .filter(__ -> status.isTested(job.id(), change)); } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); Change fullChange = status.application().require(instance).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.applicationVersion()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); return job.lastSuccess() .filter(run -> change.platform().map(run.versions().targetPlatform()::equals).orElse(true) && change.application().map(run.versions().targetApplication()::equals).orElse(true)) .flatMap(Run::end); } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType testType, JobType prodType) { JobStatus job = status.instanceJobs(instance).get(testType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Versions versions = Versions.from(change, status.application, status.deploymentFor(job.id()), status.systemVersion); return job.lastSuccess() .filter(run -> versions.targetsMatch(run.versions())) .filter(run -> ! status.jobs() .instance(instance) .type(prodType) .lastCompleted().endedNoLaterThan(run.start()) .isEmpty()) .map(run -> run.end().get()); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType, boolean declared) { JobStatus job = status.instanceJobs(instance).get(jobType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return RunList.from(job) .matching(run -> run.versions().targetsMatch(Versions.from(change, status.application, dependent.flatMap(status::deploymentFor), status.systemVersion))) .status(RunStatus.success) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } }
class DeploymentStatus { public static List<JobId> jobsFor(Application application, SystemName system) { if (DeploymentSpec.empty.equals(application.deploymentSpec())) return List.of(); return application.deploymentSpec().instances().stream() .flatMap(spec -> Stream.concat(Stream.of(systemTest, stagingTest), flatten(spec).filter(step -> step.concerns(prod)) .map(step -> { if (step instanceof DeclaredZone) return JobType.from(system, prod, ((DeclaredZone) step).region().get()); return JobType.testFrom(system, ((DeclaredTest) step).region()); }) .flatMap(Optional::stream)) .map(type -> new JobId(application.id().instance(spec.name()), type))) .collect(toUnmodifiableList()); } private static Stream<DeploymentSpec.Step> flatten(DeploymentSpec.Step step) { return step instanceof DeploymentSpec.Steps ? step.steps().stream().flatMap(DeploymentStatus::flatten) : Stream.of(step); } private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final SystemName system; private final Version systemVersion; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Map<JobId, JobStatus> allJobs, SystemName system, Version systemVersion, Instant now) { this.application = requireNonNull(application); this.allJobs = JobList.from(allJobs.values()); this.system = requireNonNull(system); this.systemVersion = requireNonNull(systemVersion); this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps); this.allSteps = List.copyOf(allSteps); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failing() .not().withStatus(RunStatus.outOfCapacity) .isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(Collectors.toUnmodifiableMap(job -> job.id().type(), job -> job)); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.asList().stream() .collect(groupingBy(job -> job.id().application(), collectingAndThen(toUnmodifiableList(), JobList::from))); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any oustanding change, which will likely be needed to lated deploy this change. */ public Map<JobId, List<Versions>> jobsToRun() { Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Versions>> jobs = jobsToRun(changes); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, outstandingChange(instance).onTopOf(application.require(instance).change())); var testJobs = jobsToRun(changes, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), ImmutableMap::copyOf)); } private Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { Map<JobId, Versions> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Versions>> testJobs = testJobs(productionJobs); Map<JobId, List<Versions>> jobs = new LinkedHashMap<>(testJobs); productionJobs.forEach((job, versions) -> jobs.put(job, List.of(versions))); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Optional<JobId> firstProductionJobWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .findFirst(); Versions versions = Versions.from(change, application, firstProductionJobWithDeployment.flatMap(this::deploymentFor), systemVersion); if (step.completedAt(change, firstProductionJobWithDeployment).isEmpty()) jobs.merge(job, List.of(versions), DeploymentStatus::union); }); return ImmutableMap.copyOf(jobs); } /** The set of jobs that need to run for the given changes to be considered complete. */ public Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone(system))); } /** * The change of this application's latest submission, if this upgrades any of its production deployments, * and has not yet started rolling out, due to some other change or a block window being present at the time of submission. */ public Change outstandingChange(InstanceName instance) { return application.latestVersion().map(Change::of) .filter(change -> application.require(instance).change().application().map(change::upgrades).orElse(true)) .filter(change -> ! jobsToRun(Map.of(instance, change)).isEmpty()) .orElse(Change.empty()); } /** * True if the job has already been triggered on the given versions, or if all test types (systemTest, stagingTest), * restricted to the job's instance if declared in that instance, have successful runs on the given versions. */ public boolean isTested(JobId job, Change change) { Versions versions = Versions.from(change, application, deploymentFor(job), systemVersion); return allJobs.triggeredOn(versions).get(job).isPresent() || Stream.of(systemTest, stagingTest) .noneMatch(testType -> declaredTest(job.application(), testType).map(__ -> allJobs.instance(job.application().instance())) .orElse(allJobs) .type(testType) .successOn(versions).isEmpty()); } private Map<JobId, Versions> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { ImmutableMap.Builder<JobId, Versions> jobs = ImmutableMap.builder(); jobSteps.forEach((job, step) -> { Optional<Deployment> deployment = deploymentFor(job) .map(existing -> assumeUpgradesSucceed ? new Deployment(existing.zone(), existing.applicationVersion(), change.platform().orElse(existing.version()), existing.at(), existing.metrics(), existing.activity(), existing.quota()) : existing); if ( job.application().instance().equals(instance) && job.type().isProduction() && step.completedAt(change).isEmpty()) jobs.put(job, Versions.from(change, application, deployment, systemVersion)); }); return jobs.build(); } /** The production jobs that need to run to complete roll-out of the given change to production. */ public Map<JobId, Versions> productionJobs(InstanceName instance, Change change) { return productionJobs(instance, change, false); } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Versions>> testJobs(Map<JobId, Versions> jobs) { Map<JobId, List<Versions>> testJobs = new LinkedHashMap<>(); for (JobType testType : List.of(systemTest, stagingTest)) { jobs.forEach((job, versions) -> { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { if (allJobs.successOn(versions).get(testJob).isEmpty()) testJobs.merge(testJob, List.of(versions), DeploymentStatus::union); }); } }); jobs.forEach((job, versions) -> { if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(versions).type(testType).isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type() == testType && testJobs.get(test).contains(versions))) testJobs.merge(firstDeclaredOrElseImplicitTest(testType), List.of(versions), DeploymentStatus::union); }); } return ImmutableMap.copyOf(testJobs); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null); return ImmutableMap.copyOf(dependencies); } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance) { if (step.steps().isEmpty()) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), null) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, instance, jobType, true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.testFrom(system, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); JobType preType = JobType.from(system, prod, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, instance, jobType, preType); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), ((DeclaredZone) step).region().get()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, instance, jobType); previous = List.of(stepStatus); } else return previous; JobId jobId = new JobId(application.id().instance(instance), jobType); allSteps.removeIf(existing -> existing.job().equals(Optional.of(jobId))); allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); for (JobType test : List.of(systemTest, stagingTest)) { JobId job = new JobId(application.id().instance(instance), test); if ( ! dependencies.containsKey(job)) { var testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test.environment()), List.of(), this, job.application().instance(), test, false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { return dependencies.stream().allMatch(step -> step.completedAt(change, dependent).isPresent()) ? dependencies.stream().map(step -> step.completedAt(change, dependent).get()) .max(naturalOrder()) .or(() -> Optional.of(Instant.EPOCH)) : Optional.empty(); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.application().isEmpty() || change.application().equals(instance.change().application())) || status.jobsToRun(Map.of(instance.name(), change)).isEmpty()) ? dependenciesCompletedAt(change, dependent) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.application().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.application().isPresent() && ! change.application().get().equals(lastVersions.targetApplication())) return Optional.empty(); if (status.application.deploymentSpec().requireInstance(job.id().application().instance()).upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return Optional.empty(); if (job.id().type().environment().isTest() && job.isOutOfCapacity()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); JobStatus job = status.instanceJobs(instance).get(jobType); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(instance) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return super.readyAt(change, Optional.of(job.id())) .filter(__ -> status.isTested(job.id(), change)); } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); Change fullChange = status.application().require(instance).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.applicationVersion()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); return job.lastSuccess() .filter(run -> change.platform().map(run.versions().targetPlatform()::equals).orElse(true) && change.application().map(run.versions().targetApplication()::equals).orElse(true)) .flatMap(Run::end); } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType testType, JobType prodType) { JobStatus job = status.instanceJobs(instance).get(testType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Versions versions = Versions.from(change, status.application, status.deploymentFor(job.id()), status.systemVersion); return job.lastSuccess() .filter(run -> versions.targetsMatch(run.versions())) .filter(run -> ! status.jobs() .instance(instance) .type(prodType) .lastCompleted().endedNoLaterThan(run.start()) .isEmpty()) .map(run -> run.end().get()); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType, boolean declared) { JobStatus job = status.instanceJobs(instance).get(jobType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return RunList.from(job) .matching(run -> run.versions().targetsMatch(Versions.from(change, status.application, dependent.flatMap(status::deploymentFor), status.systemVersion))) .status(RunStatus.success) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } }
When there aren't any explicit ones.
public List<StepStatus> allSteps() { List<JobId> firstTestJobs = List.of(firstDeclaredOrElseImplicitTest(systemTest), firstDeclaredOrElseImplicitTest(stagingTest)); return allSteps.stream() .filter(step -> step.isDeclared() || firstTestJobs.contains(step.job().orElseThrow())) .collect(toUnmodifiableList()); }
List<JobId> firstTestJobs = List.of(firstDeclaredOrElseImplicitTest(systemTest),
public List<StepStatus> allSteps() { List<JobId> firstTestJobs = List.of(firstDeclaredOrElseImplicitTest(systemTest), firstDeclaredOrElseImplicitTest(stagingTest)); return allSteps.stream() .filter(step -> step.isDeclared() || firstTestJobs.contains(step.job().orElseThrow())) .collect(toUnmodifiableList()); }
class DeploymentStatus { public static List<JobId> jobsFor(Application application, SystemName system) { if (DeploymentSpec.empty.equals(application.deploymentSpec())) return List.of(); return application.deploymentSpec().instances().stream() .flatMap(spec -> Stream.concat(Stream.of(systemTest, stagingTest), flatten(spec).filter(step -> step.concerns(prod)) .map(step -> { if (step instanceof DeclaredZone) return JobType.from(system, prod, ((DeclaredZone) step).region().get()); return JobType.testFrom(system, ((DeclaredTest) step).region()); }) .flatMap(Optional::stream)) .map(type -> new JobId(application.id().instance(spec.name()), type))) .collect(toUnmodifiableList()); } private static Stream<DeploymentSpec.Step> flatten(DeploymentSpec.Step step) { return step instanceof DeploymentSpec.Steps ? step.steps().stream().flatMap(DeploymentStatus::flatten) : Stream.of(step); } private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final SystemName system; private final Version systemVersion; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Map<JobId, JobStatus> allJobs, SystemName system, Version systemVersion, Instant now) { this.application = requireNonNull(application); this.allJobs = JobList.from(allJobs.values()); this.system = requireNonNull(system); this.systemVersion = requireNonNull(systemVersion); this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps); this.allSteps = List.copyOf(allSteps); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failing() .not().withStatus(RunStatus.outOfCapacity) .isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(Collectors.toUnmodifiableMap(job -> job.id().type(), job -> job)); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.asList().stream() .collect(groupingBy(job -> job.id().application(), collectingAndThen(toUnmodifiableList(), JobList::from))); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any oustanding change, which will likely be needed to lated deploy this change. */ public Map<JobId, List<Versions>> jobsToRun() { Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Versions>> jobs = jobsToRun(changes); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, outstandingChange(instance).onTopOf(application.require(instance).change())); var testJobs = jobsToRun(changes, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), ImmutableMap::copyOf)); } private Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { Map<JobId, Versions> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Versions>> testJobs = testJobs(productionJobs); Map<JobId, List<Versions>> jobs = new LinkedHashMap<>(testJobs); productionJobs.forEach((job, versions) -> jobs.put(job, List.of(versions))); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Optional<JobId> firstProductionJobWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .findFirst(); Versions versions = Versions.from(change, application, firstProductionJobWithDeployment.flatMap(this::deploymentFor), systemVersion); if (step.completedAt(change, firstProductionJobWithDeployment).isEmpty()) jobs.merge(job, List.of(versions), DeploymentStatus::union); }); return ImmutableMap.copyOf(jobs); } /** The set of jobs that need to run for the given changes to be considered complete. */ public Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone(system))); } /** * The change of this application's latest submission, if this upgrades any of its production deployments, * and has not yet started rolling out, due to some other change or a block window being present at the time of submission. */ public Change outstandingChange(InstanceName instance) { return application.latestVersion().map(Change::of) .filter(change -> application.require(instance).change().application().map(change::upgrades).orElse(true)) .filter(change -> ! jobsToRun(Map.of(instance, change)).isEmpty()) .orElse(Change.empty()); } /** * True if the job has already been triggered on the given versions, or if all test types (systemTest, stagingTest), * restricted to the job's instance if declared in that instance, have successful runs on the given versions. */ public boolean isTested(JobId job, Change change) { Versions versions = Versions.from(change, application, deploymentFor(job), systemVersion); return allJobs.triggeredOn(versions).get(job).isPresent() || Stream.of(systemTest, stagingTest) .noneMatch(testType -> declaredTest(job.application(), testType).map(__ -> allJobs.instance(job.application().instance())) .orElse(allJobs) .type(testType) .successOn(versions).isEmpty()); } private Map<JobId, Versions> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { ImmutableMap.Builder<JobId, Versions> jobs = ImmutableMap.builder(); jobSteps.forEach((job, step) -> { Optional<Deployment> deployment = deploymentFor(job) .map(existing -> assumeUpgradesSucceed ? new Deployment(existing.zone(), existing.applicationVersion(), change.platform().orElse(existing.version()), existing.at(), existing.metrics(), existing.activity(), existing.quota()) : existing); if ( job.application().instance().equals(instance) && job.type().isProduction() && step.completedAt(change).isEmpty()) jobs.put(job, Versions.from(change, application, deployment, systemVersion)); }); return jobs.build(); } /** The production jobs that need to run to complete roll-out of the given change to production. */ public Map<JobId, Versions> productionJobs(InstanceName instance, Change change) { return productionJobs(instance, change, false); } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Versions>> testJobs(Map<JobId, Versions> jobs) { Map<JobId, List<Versions>> testJobs = new LinkedHashMap<>(); for (JobType testType : List.of(systemTest, stagingTest)) { jobs.forEach((job, versions) -> { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { if (allJobs.successOn(versions).get(testJob).isEmpty()) testJobs.merge(testJob, List.of(versions), DeploymentStatus::union); }); } }); jobs.forEach((job, versions) -> { if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(versions).type(testType).isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type() == testType && testJobs.get(test).contains(versions))) testJobs.merge(firstDeclaredOrElseImplicitTest(testType), List.of(versions), DeploymentStatus::union); }); } return ImmutableMap.copyOf(testJobs); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null); return ImmutableMap.copyOf(dependencies); } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance) { if (step.steps().isEmpty()) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), null) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, instance, jobType, true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.testFrom(system, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); JobType preType = JobType.from(system, prod, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, instance, jobType, preType); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), ((DeclaredZone) step).region().get()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, instance, jobType); previous = List.of(stepStatus); } else return previous; JobId jobId = new JobId(application.id().instance(instance), jobType); allSteps.removeIf(existing -> existing.job().equals(Optional.of(jobId))); allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); for (JobType test : List.of(systemTest, stagingTest)) { JobId job = new JobId(application.id().instance(instance), test); if ( ! dependencies.containsKey(job)) { var testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test.environment()), List.of(), this, job.application().instance(), test, false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { return dependencies.stream().allMatch(step -> step.completedAt(change, dependent).isPresent()) ? dependencies.stream().map(step -> step.completedAt(change, dependent).get()) .max(naturalOrder()) .or(() -> Optional.of(Instant.EPOCH)) : Optional.empty(); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.application().isEmpty() || change.application().equals(instance.change().application())) || status.jobsToRun(Map.of(instance.name(), change)).isEmpty()) ? dependenciesCompletedAt(change, dependent) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.application().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.application().isPresent() && ! change.application().get().equals(lastVersions.targetApplication())) return Optional.empty(); if (status.application.deploymentSpec().requireInstance(job.id().application().instance()).upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return Optional.empty(); if (job.id().type().environment().isTest() && job.isOutOfCapacity()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); JobStatus job = status.instanceJobs(instance).get(jobType); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(instance) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return super.readyAt(change, Optional.of(job.id())) .filter(__ -> status.isTested(job.id(), change)); } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); Change fullChange = status.application().require(instance).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.applicationVersion()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); return job.lastSuccess() .filter(run -> change.platform().map(run.versions().targetPlatform()::equals).orElse(true) && change.application().map(run.versions().targetApplication()::equals).orElse(true)) .flatMap(Run::end); } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType testType, JobType prodType) { JobStatus job = status.instanceJobs(instance).get(testType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Versions versions = Versions.from(change, status.application, status.deploymentFor(job.id()), status.systemVersion); return job.lastSuccess() .filter(run -> versions.targetsMatch(run.versions())) .filter(run -> ! status.jobs() .instance(instance) .type(prodType) .lastCompleted().endedNoLaterThan(run.start()) .isEmpty()) .map(run -> run.end().get()); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType, boolean declared) { JobStatus job = status.instanceJobs(instance).get(jobType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return RunList.from(job) .matching(run -> run.versions().targetsMatch(Versions.from(change, status.application, dependent.flatMap(status::deploymentFor), status.systemVersion))) .status(RunStatus.success) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } }
class DeploymentStatus { public static List<JobId> jobsFor(Application application, SystemName system) { if (DeploymentSpec.empty.equals(application.deploymentSpec())) return List.of(); return application.deploymentSpec().instances().stream() .flatMap(spec -> Stream.concat(Stream.of(systemTest, stagingTest), flatten(spec).filter(step -> step.concerns(prod)) .map(step -> { if (step instanceof DeclaredZone) return JobType.from(system, prod, ((DeclaredZone) step).region().get()); return JobType.testFrom(system, ((DeclaredTest) step).region()); }) .flatMap(Optional::stream)) .map(type -> new JobId(application.id().instance(spec.name()), type))) .collect(toUnmodifiableList()); } private static Stream<DeploymentSpec.Step> flatten(DeploymentSpec.Step step) { return step instanceof DeploymentSpec.Steps ? step.steps().stream().flatMap(DeploymentStatus::flatten) : Stream.of(step); } private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final SystemName system; private final Version systemVersion; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Map<JobId, JobStatus> allJobs, SystemName system, Version systemVersion, Instant now) { this.application = requireNonNull(application); this.allJobs = JobList.from(allJobs.values()); this.system = requireNonNull(system); this.systemVersion = requireNonNull(systemVersion); this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps); this.allSteps = List.copyOf(allSteps); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failing() .not().withStatus(RunStatus.outOfCapacity) .isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(Collectors.toUnmodifiableMap(job -> job.id().type(), job -> job)); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.asList().stream() .collect(groupingBy(job -> job.id().application(), collectingAndThen(toUnmodifiableList(), JobList::from))); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any oustanding change, which will likely be needed to lated deploy this change. */ public Map<JobId, List<Versions>> jobsToRun() { Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Versions>> jobs = jobsToRun(changes); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, outstandingChange(instance).onTopOf(application.require(instance).change())); var testJobs = jobsToRun(changes, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), ImmutableMap::copyOf)); } private Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { Map<JobId, Versions> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Versions>> testJobs = testJobs(productionJobs); Map<JobId, List<Versions>> jobs = new LinkedHashMap<>(testJobs); productionJobs.forEach((job, versions) -> jobs.put(job, List.of(versions))); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Optional<JobId> firstProductionJobWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .findFirst(); Versions versions = Versions.from(change, application, firstProductionJobWithDeployment.flatMap(this::deploymentFor), systemVersion); if (step.completedAt(change, firstProductionJobWithDeployment).isEmpty()) jobs.merge(job, List.of(versions), DeploymentStatus::union); }); return ImmutableMap.copyOf(jobs); } /** The set of jobs that need to run for the given changes to be considered complete. */ public Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone(system))); } /** * The change of this application's latest submission, if this upgrades any of its production deployments, * and has not yet started rolling out, due to some other change or a block window being present at the time of submission. */ public Change outstandingChange(InstanceName instance) { return application.latestVersion().map(Change::of) .filter(change -> application.require(instance).change().application().map(change::upgrades).orElse(true)) .filter(change -> ! jobsToRun(Map.of(instance, change)).isEmpty()) .orElse(Change.empty()); } /** * True if the job has already been triggered on the given versions, or if all test types (systemTest, stagingTest), * restricted to the job's instance if declared in that instance, have successful runs on the given versions. */ public boolean isTested(JobId job, Change change) { Versions versions = Versions.from(change, application, deploymentFor(job), systemVersion); return allJobs.triggeredOn(versions).get(job).isPresent() || Stream.of(systemTest, stagingTest) .noneMatch(testType -> declaredTest(job.application(), testType).map(__ -> allJobs.instance(job.application().instance())) .orElse(allJobs) .type(testType) .successOn(versions).isEmpty()); } private Map<JobId, Versions> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { ImmutableMap.Builder<JobId, Versions> jobs = ImmutableMap.builder(); jobSteps.forEach((job, step) -> { Optional<Deployment> deployment = deploymentFor(job) .map(existing -> assumeUpgradesSucceed ? new Deployment(existing.zone(), existing.applicationVersion(), change.platform().orElse(existing.version()), existing.at(), existing.metrics(), existing.activity(), existing.quota()) : existing); if ( job.application().instance().equals(instance) && job.type().isProduction() && step.completedAt(change).isEmpty()) jobs.put(job, Versions.from(change, application, deployment, systemVersion)); }); return jobs.build(); } /** The production jobs that need to run to complete roll-out of the given change to production. */ public Map<JobId, Versions> productionJobs(InstanceName instance, Change change) { return productionJobs(instance, change, false); } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Versions>> testJobs(Map<JobId, Versions> jobs) { Map<JobId, List<Versions>> testJobs = new LinkedHashMap<>(); for (JobType testType : List.of(systemTest, stagingTest)) { jobs.forEach((job, versions) -> { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { if (allJobs.successOn(versions).get(testJob).isEmpty()) testJobs.merge(testJob, List.of(versions), DeploymentStatus::union); }); } }); jobs.forEach((job, versions) -> { if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(versions).type(testType).isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type() == testType && testJobs.get(test).contains(versions))) testJobs.merge(firstDeclaredOrElseImplicitTest(testType), List.of(versions), DeploymentStatus::union); }); } return ImmutableMap.copyOf(testJobs); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null); return ImmutableMap.copyOf(dependencies); } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance) { if (step.steps().isEmpty()) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), null) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, instance, jobType, true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.testFrom(system, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); JobType preType = JobType.from(system, prod, ((DeclaredTest) step).region()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, instance, jobType, preType); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.from(system, ((DeclaredZone) step).environment(), ((DeclaredZone) step).region().get()) .orElseThrow(() -> new IllegalStateException(application + " specifies " + step + ", but this has no job in " + system)); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, instance, jobType); previous = List.of(stepStatus); } else return previous; JobId jobId = new JobId(application.id().instance(instance), jobType); allSteps.removeIf(existing -> existing.job().equals(Optional.of(jobId))); allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); for (JobType test : List.of(systemTest, stagingTest)) { JobId job = new JobId(application.id().instance(instance), test); if ( ! dependencies.containsKey(job)) { var testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test.environment()), List.of(), this, job.application().instance(), test, false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { return dependencies.stream().allMatch(step -> step.completedAt(change, dependent).isPresent()) ? dependencies.stream().map(step -> step.completedAt(change, dependent).get()) .max(naturalOrder()) .or(() -> Optional.of(Instant.EPOCH)) : Optional.empty(); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.application().isEmpty() || change.application().equals(instance.change().application())) || status.jobsToRun(Map.of(instance.name(), change)).isEmpty()) ? dependenciesCompletedAt(change, dependent) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.application().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.application().isPresent() && ! change.application().get().equals(lastVersions.targetApplication())) return Optional.empty(); if (status.application.deploymentSpec().requireInstance(job.id().application().instance()).upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return Optional.empty(); if (job.id().type().environment().isTest() && job.isOutOfCapacity()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); JobStatus job = status.instanceJobs(instance).get(jobType); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(instance) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return super.readyAt(change, Optional.of(job.id())) .filter(__ -> status.isTested(job.id(), change)); } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); Change fullChange = status.application().require(instance).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.applicationVersion()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); return job.lastSuccess() .filter(run -> change.platform().map(run.versions().targetPlatform()::equals).orElse(true) && change.application().map(run.versions().targetApplication()::equals).orElse(true)) .flatMap(Run::end); } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType testType, JobType prodType) { JobStatus job = status.instanceJobs(instance).get(testType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Versions versions = Versions.from(change, status.application, status.deploymentFor(job.id()), status.systemVersion); return job.lastSuccess() .filter(run -> versions.targetsMatch(run.versions())) .filter(run -> ! status.jobs() .instance(instance) .type(prodType) .lastCompleted().endedNoLaterThan(run.start()) .isEmpty()) .map(run -> run.end().get()); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, InstanceName instance, JobType jobType, boolean declared) { JobStatus job = status.instanceJobs(instance).get(jobType); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override public Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return RunList.from(job) .matching(run -> run.versions().targetsMatch(Versions.from(change, status.application, dependent.flatMap(status::deploymentFor), status.systemVersion))) .status(RunStatus.success) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } }
I don't understand this ... This returns the on encountering the newest (? is that what descendingMap does) job that was a success? But wouldn't you then return that jobs version? And why is it oldest?
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
if (run.status() == RunStatus.success)
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
The assumption is that the last/newest success converged, making all nodes have its target version. The versions of that run, and all runs after it (since they're all non-successes) may have caused at least one node to have their target version, so the versions to consider are all of these. The oldest one is what we're after. Oldest platform version = lowest version number.
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
if (run.status() == RunStatus.success)
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
Yes, the runs are ordered by increasing run number, so the descending map iterator starts with the latest run.
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
if (run.status() == RunStatus.success)
private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BooleanFlag provisionApplicationRoles; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, SecretStore secretStore, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.provisionApplicationRoles = Flags.PROVISION_APPLICATION_ROLES.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager(controller.zoneRegistry(), curator, secretStore, controller.serviceRegistry().endpointCertificateProvider(), clock, flagSource); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)) .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all content clusters in all current deployments of the given application. */ public Map<ZoneId, List<String>> contentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } public LockedApplication withNewInstance(LockedApplication application, ApplicationId id) { if (id.instance().isTester()) throw new IllegalArgumentException("'" + id + "' is a tester application!"); InstanceId.validate(id.instance().value()); if (getInstance(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance already exists"); if (getInstance(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Instance " + dashToUnderscore(id) + " already exists"); log.info("Created " + id); return application.withNewInstance(id.instance()); } public ActivateResult deploy(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy2(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<ApplicationRoles> applicationRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platform.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || revision.compareTo(deployment.applicationVersion()) < 0 && ! (revision.isUnknown() && controller.system().isCd()))) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); endpoints = controller.routing().registerEndpointsInDns(application.get(), job.application().instance(), zone); if (provisionApplicationRoles.with(FetchVector.Dimension.ZONE_ID, zone.value()).value()) { try { applicationRoles = controller.serviceRegistry().applicationRoleService().createApplicationRoles(instance.id()); } catch (Exception e) { log.log(Level.SEVERE, "Exception creating application roles for application: " + instance.id(), e); throw new RuntimeException("Unable to provision iam roles for application"); } } } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, endpoints, endpointCertificateMetadata, applicationRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } public ActivateResult deploy(ApplicationId instanceId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, Optional<ApplicationVersion> applicationVersionFromDeployer, DeployOptions options) { if (instanceId.instance().isTester()) throw new IllegalArgumentException("'" + instanceId + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); if (getInstance(instanceId).isEmpty()) createInstance(instanceId); try (Lock deploymentLock = lockForDeployment(instanceId, zone)) { Version platformVersion; ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Set<ContainerEndpoint> endpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); InstanceName instance = instanceId.instance(); boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed(); boolean preferOldestVersion = options.deployCurrentVersion; if (manuallyDeployed) { applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown); applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given when deploying to " + zone)); platformVersion = options.vespaVersion.map(Version::new) .orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(this::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)); } else { JobType jobType = JobType.from(controller.system(), zone) .orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + ".")); var run = controller.jobController().last(instanceId, jobType); if (run.map(Run::hasEnded).orElse(true)) return unexpectedDeployment(instanceId, zone); Versions versions = run.get().versions(); platformVersion = preferOldestVersion ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); applicationVersion = preferOldestVersion ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); applicationPackage = getApplicationPackage(instanceId, applicationVersion); applicationPackage = withTesterCertificate(applicationPackage, instanceId, jobType); validateRun(application.get().require(instance), zone, platformVersion, applicationVersion); } endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata( application.get().require(instance), zone, applicationPackage.deploymentSpec().instance(instance)); endpoints = controller.routing().registerEndpointsInDns(application.get(), instance, zone); } ActivateResult result = deploy(instanceId, applicationPackage, zone, platformVersion, endpoints, endpointCertificateMetadata, Optional.empty()); var quotaUsage = deploymentQuotaUsage(zone, instanceId); lockApplicationOrThrow(applicationId, application -> store(application.with(instanceId.instance(), instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) { if (applicationPackage.trustedCertificates().isEmpty()) return applicationPackage; Run run = controller.jobController().last(id, type) .orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found")); if (run.testerCertificate().isEmpty()) return applicationPackage; return applicationPackage.withTrustedCertificate(run.testerCertificate().get()); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<ApplicationRoles> applicationRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, applicationRoles, deploymentQuota)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = List.of(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of(), List.of()); return new ActivateResult(new RevisionId("0"), prepareResponse, 0); } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { try { return configServer.isSuspended(deploymentId); } catch (ConfigServerException e) { if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND) return false; throw e; } } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** Verify that we don't downgrade an existing production deployment. */ private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) { Deployment deployment = instance.deployments().get(zone); if ( zone.environment().isProduction() && deployment != null && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned() || applicationVersion.compareTo(deployment.applicationVersion()) < 0)) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", instance.id(), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion())); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
Oops, not intended to be here, but this is just removal of dead code that shouldn't be there in the first place.
private DocumentOperationParameters parametersFromRequest(HttpRequest request, String... names) { DocumentOperationParameters parameters = getProperty(request, TRACELEVEL, integerParser).map(parameters()::withTraceLevel) .orElse(parameters()); for (String name : names) switch (name) { case CLUSTER: parameters = getProperty(request, CLUSTER).map(cluster -> resolveCluster(Optional.of(cluster), clusters).route()) .map(parameters::withRoute) .orElse(parameters); break; case FIELD_SET: parameters = getProperty(request, FIELD_SET).map(parameters::withFieldSet) .orElse(parameters); break; case ROUTE: parameters = getProperty(request, ROUTE).map(parameters::withRoute) .orElse(parameters); break; default: throw new IllegalArgumentException("Unrecognized document operation parameter name '" + name + "'"); } return parameters; }
break;
private DocumentOperationParameters parametersFromRequest(HttpRequest request, String... names) { DocumentOperationParameters parameters = getProperty(request, TRACELEVEL, integerParser).map(parameters()::withTraceLevel) .orElse(parameters()); for (String name : names) switch (name) { case CLUSTER: parameters = getProperty(request, CLUSTER).map(cluster -> resolveCluster(Optional.of(cluster), clusters).route()) .map(parameters::withRoute) .orElse(parameters); break; case FIELD_SET: parameters = getProperty(request, FIELD_SET).map(parameters::withFieldSet) .orElse(parameters); break; case ROUTE: parameters = getProperty(request, ROUTE).map(parameters::withRoute) .orElse(parameters); break; default: throw new IllegalArgumentException("Unrecognized document operation parameter name '" + name + "'"); } return parameters; }
class DocumentV1ApiHandler extends AbstractRequestHandler { private static final Duration defaultTimeout = Duration.ofSeconds(175); private static final Logger log = Logger.getLogger(DocumentV1ApiHandler.class.getName()); private static final Parser<Integer> integerParser = Integer::parseInt; private static final Parser<Long> timeoutMillisParser = value -> ParameterParser.asMilliSeconds(value, defaultTimeout.toMillis()); private static final Parser<Boolean> booleanParser = Boolean::parseBoolean; private static final CompletionHandler logException = new CompletionHandler() { @Override public void completed() { } @Override public void failed(Throwable t) { log.log(FINE, "Exception writing or closing response data", t); } }; private static final ContentChannel ignoredContent = new ContentChannel() { @Override public void write(ByteBuffer buf, CompletionHandler handler) { handler.completed(); } @Override public void close(CompletionHandler handler) { handler.completed(); } }; private static final JsonFactory jsonFactory = new JsonFactory(); private static final String CREATE = "create"; private static final String CONDITION = "condition"; private static final String ROUTE = "route"; private static final String FIELD_SET = "fieldSet"; private static final String SELECTION = "selection"; private static final String CLUSTER = "cluster"; private static final String CONTINUATION = "continuation"; private static final String WANTED_DOCUMENT_COUNT = "wantedDocumentCount"; private static final String CONCURRENCY = "concurrency"; private static final String BUCKET_SPACE = "bucketSpace"; private static final String TIMEOUT = "timeout"; private static final String TRACELEVEL = "tracelevel"; private final Clock clock; private final Metric metric; private final DocumentApiMetrics metrics; private final DocumentOperationParser parser; private final long maxThrottled; private final DocumentAccess access; private final AsyncSession asyncSession; private final Map<String, StorageCluster> clusters; private final Deque<Operation> operations; private final AtomicLong enqueued = new AtomicLong(); private final Map<VisitorControlHandler, VisitorSession> visits = new ConcurrentHashMap<>(); private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("document-api-handler-")); private final Map<String, Map<Method, Handler>> handlers = defineApi(); @Inject public DocumentV1ApiHandler(Metric metric, MetricReceiver metricReceiver, VespaDocumentAccess documentAccess, DocumentmanagerConfig documentManagerConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig, DocumentOperationExecutorConfig executorConfig) { this(Clock.systemUTC(), metric, metricReceiver, documentAccess, documentManagerConfig, executorConfig, clusterListConfig, bucketSpacesConfig); } DocumentV1ApiHandler(Clock clock, Metric metric, MetricReceiver metricReceiver, DocumentAccess access, DocumentmanagerConfig documentmanagerConfig, DocumentOperationExecutorConfig executorConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig) { this.clock = clock; this.parser = new DocumentOperationParser(documentmanagerConfig); this.metric = metric; this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1"); this.maxThrottled = executorConfig.maxThrottled(); this.access = access; this.asyncSession = access.createAsyncSession(new AsyncParameters()); this.clusters = parseClusters(clusterListConfig, bucketSpacesConfig); this.operations = new ConcurrentLinkedDeque<>(); this.executor.scheduleWithFixedDelay(this::dispatchEnqueued, executorConfig.resendDelayMillis(), executorConfig.resendDelayMillis(), TimeUnit.MILLISECONDS); } @Override public ContentChannel handleRequest(Request rawRequest, ResponseHandler rawResponseHandler) { HandlerMetricContextUtil.onHandle(rawRequest, metric, getClass()); ResponseHandler responseHandler = response -> { HandlerMetricContextUtil.onHandled(rawRequest, metric, getClass()); return rawResponseHandler.handleResponse(response); }; HttpRequest request = (HttpRequest) rawRequest; try { request.setTimeout(getProperty(request, TIMEOUT, timeoutMillisParser) .orElse(defaultTimeout.toMillis()), TimeUnit.MILLISECONDS); Path requestPath = new Path(request.getUri()); for (String path : handlers.keySet()) if (requestPath.matches(path)) { Map<Method, Handler> methods = handlers.get(path); if (methods.containsKey(request.getMethod())) return methods.get(request.getMethod()).handle(request, new DocumentPath(requestPath), responseHandler); if (request.getMethod() == OPTIONS) options(methods.keySet(), responseHandler); methodNotAllowed(request, methods.keySet(), responseHandler); } notFound(request, handlers.keySet(), responseHandler); } catch (IllegalArgumentException e) { badRequest(request, e, responseHandler); } catch (RuntimeException e) { serverError(request, e, responseHandler); } return ignoredContent; } @Override public void handleTimeout(Request request, ResponseHandler responseHandler) { timeout((HttpRequest) request, "Request timeout after " + request.getTimeout(TimeUnit.MILLISECONDS) + "ms", responseHandler); } @Override public void destroy() { executor.shutdown(); Instant doom = clock.instant().plus(Duration.ofSeconds(20)); while ( ! operations.isEmpty() && clock.instant().isBefore(doom)) dispatchEnqueued(); if ( ! operations.isEmpty()) log.log(WARNING, "Failed to empty request queue before shutdown timeout — " + operations.size() + " requests left"); asyncSession.destroy(); visits.values().forEach(VisitorSession::destroy); try { if ( ! executor.awaitTermination(Duration.between(clock.instant(), doom).toMillis(), TimeUnit.MILLISECONDS)) executor.shutdownNow(); } catch (InterruptedException e) { log.log(WARNING, "Interrupted waiting for /document/v1 executor to shut down"); } } @FunctionalInterface interface Handler { ContentChannel handle(HttpRequest request, DocumentPath path, ResponseHandler handler); } /** Defines all paths/methods handled by this handler. */ private Map<String, Map<Method, Handler>> defineApi() { Map<String, Map<Method, Handler>> handlers = new LinkedHashMap<>(); handlers.put("/document/v1/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); return Collections.unmodifiableMap(handlers); } private ContentChannel getDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { VisitorParameters parameters = parseParameters(request, path); return () -> { visit(request, parameters, handler); return true; }; }); return ignoredContent; } private ContentChannel getDocument(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { DocumentOperationParameters rawParameters = parametersFromRequest(request, CLUSTER, FIELD_SET); if (rawParameters.fieldSet().isEmpty()) rawParameters = rawParameters.withFieldSet(path.documentType().orElseThrow() + ":[document]"); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> { handle(path, handler, response, (document, jsonResponse) -> { if (document != null) { jsonResponse.writeSingleDocument(document); jsonResponse.commit(Response.Status.OK); } else jsonResponse.commit(Response.Status.NOT_FOUND); }); }); return () -> dispatchOperation(() -> asyncSession.get(path.id(), parameters)); }); return ignoredContent; } private ContentChannel postDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.PUT, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentPut put = parser.parsePut(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(put::setCondition); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(() -> asyncSession.put(put, parameters)); }); }); } private ContentChannel putDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.UPDATE, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentUpdate update = parser.parseUpdate(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(update::setCondition); getProperty(request, CREATE, booleanParser).ifPresent(update::setCreateIfNonExistent); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(() -> asyncSession.update(update, parameters)); }); }); } private ContentChannel deleteDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.REMOVE, clock.instant()); enqueueAndDispatch(request, handler, () -> { DocumentRemove remove = new DocumentRemove(path.id()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(remove::setCondition); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(() -> asyncSession.remove(remove, parameters)); }); return ignoredContent; } /** Dispatches enqueued requests until one is blocked. */ void dispatchEnqueued() { try { while (dispatchFirst()); } catch (Exception e) { log.log(WARNING, "Uncaught exception in /document/v1 dispatch thread", e); } } /** Attempts to dispatch the first enqueued operations, and returns whether this was successful. */ private boolean dispatchFirst() { Operation operation = operations.poll(); if (operation == null) return false; if (operation.dispatch()) { enqueued.decrementAndGet(); return true; } operations.push(operation); return false; } /** * Enqueues the given request and operation, or responds with "overload" if the queue is full, * and then attempts to dispatch an enqueued operation from the head of the queue. */ private void enqueueAndDispatch(HttpRequest request, ResponseHandler handler, Supplier<Supplier<Boolean>> operationParser) { if (enqueued.incrementAndGet() > maxThrottled) { enqueued.decrementAndGet(); overload(request, "Rejecting execution due to overload: " + maxThrottled + " requests already enqueued", handler); return; } operations.offer(new Operation(request, handler) { @Override Supplier<Boolean> parse() { return operationParser.get(); } }); dispatchFirst(); } /** Class for writing and returning JSON responses to document operations in a thread safe manner. */ private static class JsonResponse implements AutoCloseable { private final BufferedContentChannel buffer = new BufferedContentChannel(); private final OutputStream out = new ContentChannelOutputStream(buffer); private final JsonGenerator json = jsonFactory.createGenerator(out); private final ResponseHandler handler; private ContentChannel channel; private JsonResponse(ResponseHandler handler) throws IOException { this.handler = handler; json.writeStartObject(); } /** Creates a new JsonResponse with path and id fields written. */ static JsonResponse create(DocumentPath path, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(path.rawPath()); response.writeDocId(path.id()); return response; } /** Creates a new JsonResponse with path field written. */ static JsonResponse create(HttpRequest request, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); return response; } /** Creates a new JsonResponse with path and message fields written. */ static JsonResponse create(HttpRequest request, String message, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); response.writeMessage(message); return response; } /** Commits a response with the given status code and some default headers, and writes whatever content is buffered. */ synchronized void commit(int status) throws IOException { Response response = new Response(status); response.headers().addAll(Map.of("Content-Type", List.of("application/json; charset=UTF-8"))); try { channel = handler.handleResponse(response); buffer.connectTo(channel); } catch (RuntimeException e) { throw new IOException(e); } } /** Commits a response with the given status code and some default headers, writes buffered content, and closes this. */ synchronized void respond(int status) throws IOException { try (this) { commit(status); } } /** Closes the JSON and the output content channel of this. */ @Override public synchronized void close() throws IOException { try { if (channel == null) { log.log(WARNING, "Close called before response was committed, in " + getClass().getName()); commit(Response.Status.INTERNAL_SERVER_ERROR); } json.close(); out.close(); } finally { if (channel != null) channel.close(logException); } } synchronized void writePathId(String path) throws IOException { json.writeStringField("pathId", path); } synchronized void writeMessage(String message) throws IOException { json.writeStringField("message", message); } synchronized void writeDocId(DocumentId id) throws IOException { json.writeStringField("id", id.toString()); } synchronized void writeTrace(Trace trace) throws IOException { if (trace != null && ! trace.getRoot().isEmpty()) { writeTrace(trace.getRoot()); } } private void writeTrace(TraceNode node) throws IOException { if (node.hasNote()) json.writeStringField("message", node.getNote()); if ( ! node.isLeaf()) { json.writeArrayFieldStart(node.isStrict() ? "trace" : "fork"); for (int i = 0; i < node.getNumChildren(); i++) { json.writeStartObject(); writeTrace(node.getChild(i)); json.writeEndObject(); } json.writeEndArray(); } } synchronized void writeSingleDocument(Document document) throws IOException { new JsonWriter(json).writeFields(document); } synchronized void writeDocumentsArrayStart() throws IOException { json.writeArrayFieldStart("documents"); } synchronized void writeDocumentValue(Document document) throws IOException { new JsonWriter(json).write(document); } synchronized void writeArrayEnd() throws IOException { json.writeEndArray(); } synchronized void writeContinuation(String token) throws IOException { json.writeStringField("continuation", token); } } private static void options(Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { Response response = new Response(Response.Status.NO_CONTENT); response.headers().add("Allow", methods.stream().sorted().map(Method::name).collect(joining(","))); handler.handleResponse(response).close(logException); }); } private static void badRequest(HttpRequest request, IllegalArgumentException e, ResponseHandler handler) { loggingException(() -> { String message = Exceptions.toMessageString(e); log.log(FINE, () -> "Bad request for " + request.getMethod() + " at " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.BAD_REQUEST); }); } private static void notFound(HttpRequest request, Collection<String> paths, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "Nothing at '" + request.getUri().getRawPath() + "'. " + "Available paths are:\n" + String.join("\n", paths), handler) .respond(Response.Status.NOT_FOUND); }); } private static void methodNotAllowed(HttpRequest request, Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "'" + request.getMethod() + "' not allowed at '" + request.getUri().getRawPath() + "'. " + "Allowed methods are: " + methods.stream().sorted().map(Method::name).collect(joining(", ")), handler) .respond(Response.Status.METHOD_NOT_ALLOWED); }); } private static void overload(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Overload handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.TOO_MANY_REQUESTS); }); } private static void serverError(HttpRequest request, Throwable t, ResponseHandler handler) { loggingException(() -> { log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ":", t); JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.INTERNAL_SERVER_ERROR); }); } private static void timeout(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Timeout handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.GATEWAY_TIMEOUT); }); } private static void loggingException(Exceptions.RunnableThrowingIOException runnable) { try { runnable.run(); } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static abstract class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ boolean dispatch() { if (request.isCancelled()) return true; if ( ! lock.tryLock()) throw new IllegalStateException("Concurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; } abstract Supplier<Boolean> parse(); } /** Attempts to send the given document operation, returning false if thes needs to be retried. */ private static boolean dispatchOperation(Supplier<Result> documentOperation) { Result result = documentOperation.get(); if (result.type() == Result.ResultType.TRANSIENT_ERROR) return false; if (result.type() == Result.ResultType.FATAL_ERROR) throw new RuntimeException(result.getError()); return true; } /** Readable content channel which forwards data to a reader when closed. */ static class ForwardingContentChannel implements ContentChannel { private final ReadableContentChannel delegate = new ReadableContentChannel(); private final Consumer<InputStream> reader; public ForwardingContentChannel(Consumer<InputStream> reader) { this.reader = reader; } /** Write is complete when we have stored the buffer — call completion handler. */ @Override public void write(ByteBuffer buf, CompletionHandler handler) { try { delegate.write(buf, logException); handler.completed(); } catch (Exception e) { handler.failed(e); } } /** Close is complete when we have closed the buffer. */ @Override public void close(CompletionHandler handler) { try { delegate.close(logException); reader.accept(new UnsafeContentInputStream(delegate)); handler.completed(); } catch (Exception e) { handler.failed(e); } } } static class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } DocumentPut parsePut(InputStream inputStream, String docId) { return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT); } DocumentUpdate parseUpdate(InputStream inputStream, String docId) { return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE); } private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } } interface SuccessCallback { void onSuccess(Document document, JsonResponse response) throws IOException; } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response, SuccessCallback callback) { try (JsonResponse jsonResponse = JsonResponse.create(path, handler)) { jsonResponse.writeTrace(response.getTrace()); if (response.isSuccess()) callback.onSuccess((response instanceof DocumentResponse) ? ((DocumentResponse) response).getDocument() : null, jsonResponse); else { jsonResponse.writeMessage(response.getTextMessage()); switch (response.outcome()) { case NOT_FOUND: jsonResponse.commit(Response.Status.NOT_FOUND); break; case CONDITION_FAILED: jsonResponse.commit(Response.Status.PRECONDITION_FAILED); break; case INSUFFICIENT_STORAGE: jsonResponse.commit(Response.Status.INSUFFICIENT_STORAGE); break; default: log.log(WARNING, "Unexpected document API operation outcome '" + response.outcome() + "'"); case ERROR: log.log(FINE, () -> "Exception performing document operation: " + response.getTextMessage()); jsonResponse.commit(Response.Status.INTERNAL_SERVER_ERROR); } } } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response) { handle(path, handler, response, (document, jsonResponse) -> jsonResponse.commit(Response.Status.OK)); } private VisitorParameters parseParameters(HttpRequest request, DocumentPath path) { int wantedDocumentCount = Math.min(1 << 10, getProperty(request, WANTED_DOCUMENT_COUNT, integerParser).orElse(1)); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); int concurrency = Math.min(100, getProperty(request, CONCURRENCY, integerParser).orElse(1)); if (concurrency <= 0) throw new IllegalArgumentException("concurrency must be positive"); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = new VisitorParameters(Stream.of(getProperty(request, SELECTION), path.documentType(), path.namespace().map(value -> "id.namespace=='" + value + "'"), path.group().map(Group::selection)) .flatMap(Optional::stream) .reduce(new StringJoiner(") and (", "(", ")").setEmptyValue(""), StringJoiner::add, StringJoiner::merge) .toString()); getProperty(request, CONTINUATION).map(ProgressToken::fromSerializedString).ifPresent(parameters::setResumeToken); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(concurrency)); parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - 5000)); parameters.visitInconsistentBuckets(true); parameters.setPriority(DocumentProtocol.Priority.NORMAL_4); StorageCluster storageCluster = resolveCluster(cluster, clusters); parameters.setRoute(storageCluster.route()); parameters.setBucketSpace(resolveBucket(storageCluster, path.documentType(), List.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()), getProperty(request, BUCKET_SPACE))); return parameters; } private void visit(HttpRequest request, VisitorParameters parameters, ResponseHandler handler) { try { JsonResponse response = JsonResponse.create(request, handler); response.writeDocumentsArrayStart(); CountDownLatch latch = new CountDownLatch(1); parameters.setLocalDataHandler(new DumpVisitorDataHandler() { @Override public void onDocument(Document doc, long timeStamp) { loggingException(() -> { response.writeDocumentValue(doc); }); } @Override public void onRemove(DocumentId id) { } }); parameters.setControlHandler(new VisitorControlHandler() { @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); loggingException(() -> { response.writeArrayEnd(); switch (code) { case TIMEOUT: if ( ! hasVisitedAnyBuckets()) { response.writeMessage("No buckets visited within timeout of " + parameters.getSessionTimeoutMs() + "ms (request timeout -5s)"); response.respond(Response.Status.GATEWAY_TIMEOUT); break; } case SUCCESS: case ABORTED: if (getProgress() != null && ! getProgress().isFinished()) response.writeContinuation(getProgress().serializeToString()); response.respond(Response.Status.OK); break; default: response.writeMessage(message != null ? message : "Visiting failed"); response.respond(Response.Status.INTERNAL_SERVER_ERROR); } executor.execute(() -> { try { latch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } visits.remove(this).destroy(); }); }); } }); visits.put(parameters.getControlHandler(), access.createVisitorSession(parameters)); latch.countDown(); } catch (ParseException e) { badRequest(request, new IllegalArgumentException(e), handler); } catch (IOException e) { log.log(FINE, "Failed writing response", e); } } private static Optional<String> getProperty(HttpRequest request, String name) { if ( ! request.parameters().containsKey(name)) return Optional.empty(); List<String> values = request.parameters().get(name); String value; if (values == null || values.isEmpty() || (value = values.get(values.size() - 1)) == null || value.isEmpty()) throw new IllegalArgumentException("Expected non-empty value for request property '" + name + "'"); return Optional.of(value); } private static <T> Optional<T> getProperty(HttpRequest request, String name, Parser<T> parser) { return getProperty(request, name).map(parser::parse); } @FunctionalInterface interface Parser<T> extends Function<String, T> { default T parse(String value) { try { return apply(value); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing '" + value + "': " + Exceptions.toMessageString(e)); } } } private class MeasuringResponseHandler implements ResponseHandler { private final ResponseHandler delegate; private final com.yahoo.documentapi.metrics.DocumentOperationType type; private final Instant start; private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) { this.delegate = delegate; this.type = type; this.start = start; } @Override public ContentChannel handleResponse(Response response) { switch (response.getStatus() / 100) { case 2: metrics.reportSuccessful(type, start); break; case 4: metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR); break; case 5: metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR); break; } return delegate.handleResponse(response); } } static class StorageCluster { private final String name; private final Map<String, String> documentBuckets; StorageCluster(String name, Map<String, String> documentBuckets) { this.name = requireNonNull(name); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return name(); } Optional<String> bucketOf(String documentType) { return Optional.ofNullable(documentBuckets.get(documentType)); } } private static Map<String, StorageCluster> parseClusters(ClusterListConfig clusters, AllClustersBucketSpacesConfig buckets) { return clusters.storage().stream() .collect(toUnmodifiableMap(storage -> storage.name(), storage -> new StorageCluster(storage.name(), buckets.cluster(storage.name()) .documentType().entrySet().stream() .collect(toMap(entry -> entry.getKey(), entry -> entry.getValue().bucketSpace()))))); } static StorageCluster resolveCluster(Optional<String> wanted, Map<String, StorageCluster> clusters) { if (clusters.isEmpty()) throw new IllegalArgumentException("Your Vespa deployment has no content clusters, so the document API is not enabled"); return wanted.map(cluster -> { if ( ! clusters.containsKey(cluster)) throw new IllegalArgumentException("Your Vespa deployment has no content cluster '" + cluster + "', only '" + String.join("', '", clusters.keySet()) + "'"); return clusters.get(cluster); }).orElseGet(() -> { if (clusters.size() > 1) throw new IllegalArgumentException("Please specify one of the content clusters in your Vespa deployment: '" + String.join("', '", clusters.keySet()) + "'"); return clusters.values().iterator().next(); }); } static String resolveBucket(StorageCluster cluster, Optional<String> documentType, List<String> bucketSpaces, Optional<String> bucketSpace) { return documentType.map(type -> cluster.bucketOf(type) .orElseThrow(() -> new IllegalArgumentException("Document type '" + type + "' in cluster '" + cluster.name() + "' is not mapped to a known bucket space"))) .or(() -> bucketSpace.map(space -> { if ( ! bucketSpaces.contains(space)) throw new IllegalArgumentException("Bucket space '" + space + "' is not a known bucket space; expected one of " + String.join(", ", bucketSpaces)); return space; })) .orElse(FixedBucketSpaces.defaultSpace()); } private static class DocumentPath { private final Path path; private final Optional<Group> group; DocumentPath(Path path) { this.path = requireNonNull(path); this.group = Optional.ofNullable(path.get("number")).map(integerParser::parse).map(Group::of) .or(() -> Optional.ofNullable(path.get("group")).map(Group::of)); } DocumentId id() { return new DocumentId("id:" + requireNonNull(path.get("namespace")) + ":" + requireNonNull(path.get("documentType")) + ":" + group.map(Group::docIdPart).orElse("") + ":" + requireNonNull(path.getRest())); } String rawPath() { return path.asString(); } Optional<String> documentType() { return Optional.ofNullable(path.get("documentType")); } Optional<String> namespace() { return Optional.ofNullable(path.get("namespace")); } Optional<Group> group() { return group; } } static class Group { private final String value; private final String docIdPart; private final String selection; private Group(String value, String docIdPart, String selection) { Text.validateTextString(value) .ifPresent(codePoint -> { throw new IllegalArgumentException(String.format("Illegal code point U%04X in group", codePoint)); }); this.value = value; this.docIdPart = docIdPart; this.selection = selection; } public static Group of(long value) { return new Group(Long.toString(value), "n=" + value, "id.user==" + value); } public static Group of(String value) { return new Group(value, "g=" + value, "id.group=='" + value.replaceAll("'", "\\\\'") + "'"); } public String value() { return value; } public String docIdPart() { return docIdPart; } public String selection() { return selection; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Group group = (Group) o; return value.equals(group.value) && docIdPart.equals(group.docIdPart) && selection.equals(group.selection); } @Override public int hashCode() { return Objects.hash(value, docIdPart, selection); } @Override public String toString() { return "Group{" + "value='" + value + '\'' + ", docIdPart='" + docIdPart + '\'' + ", selection='" + selection + '\'' + '}'; } } }
class DocumentV1ApiHandler extends AbstractRequestHandler { private static final Duration defaultTimeout = Duration.ofSeconds(175); private static final Logger log = Logger.getLogger(DocumentV1ApiHandler.class.getName()); private static final Parser<Integer> integerParser = Integer::parseInt; private static final Parser<Long> timeoutMillisParser = value -> ParameterParser.asMilliSeconds(value, defaultTimeout.toMillis()); private static final Parser<Boolean> booleanParser = Boolean::parseBoolean; private static final CompletionHandler logException = new CompletionHandler() { @Override public void completed() { } @Override public void failed(Throwable t) { log.log(FINE, "Exception writing or closing response data", t); } }; private static final ContentChannel ignoredContent = new ContentChannel() { @Override public void write(ByteBuffer buf, CompletionHandler handler) { handler.completed(); } @Override public void close(CompletionHandler handler) { handler.completed(); } }; private static final JsonFactory jsonFactory = new JsonFactory(); private static final String CREATE = "create"; private static final String CONDITION = "condition"; private static final String ROUTE = "route"; private static final String FIELD_SET = "fieldSet"; private static final String SELECTION = "selection"; private static final String CLUSTER = "cluster"; private static final String CONTINUATION = "continuation"; private static final String WANTED_DOCUMENT_COUNT = "wantedDocumentCount"; private static final String CONCURRENCY = "concurrency"; private static final String BUCKET_SPACE = "bucketSpace"; private static final String TIMEOUT = "timeout"; private static final String TRACELEVEL = "tracelevel"; private final Clock clock; private final Metric metric; private final DocumentApiMetrics metrics; private final DocumentOperationParser parser; private final long maxThrottled; private final DocumentAccess access; private final AsyncSession asyncSession; private final Map<String, StorageCluster> clusters; private final Deque<Operation> operations; private final AtomicLong enqueued = new AtomicLong(); private final Map<VisitorControlHandler, VisitorSession> visits = new ConcurrentHashMap<>(); private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("document-api-handler-")); private final Map<String, Map<Method, Handler>> handlers = defineApi(); @Inject public DocumentV1ApiHandler(Metric metric, MetricReceiver metricReceiver, VespaDocumentAccess documentAccess, DocumentmanagerConfig documentManagerConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig, DocumentOperationExecutorConfig executorConfig) { this(Clock.systemUTC(), metric, metricReceiver, documentAccess, documentManagerConfig, executorConfig, clusterListConfig, bucketSpacesConfig); } DocumentV1ApiHandler(Clock clock, Metric metric, MetricReceiver metricReceiver, DocumentAccess access, DocumentmanagerConfig documentmanagerConfig, DocumentOperationExecutorConfig executorConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig) { this.clock = clock; this.parser = new DocumentOperationParser(documentmanagerConfig); this.metric = metric; this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1"); this.maxThrottled = executorConfig.maxThrottled(); this.access = access; this.asyncSession = access.createAsyncSession(new AsyncParameters()); this.clusters = parseClusters(clusterListConfig, bucketSpacesConfig); this.operations = new ConcurrentLinkedDeque<>(); this.executor.scheduleWithFixedDelay(this::dispatchEnqueued, executorConfig.resendDelayMillis(), executorConfig.resendDelayMillis(), TimeUnit.MILLISECONDS); } @Override public ContentChannel handleRequest(Request rawRequest, ResponseHandler rawResponseHandler) { HandlerMetricContextUtil.onHandle(rawRequest, metric, getClass()); ResponseHandler responseHandler = response -> { HandlerMetricContextUtil.onHandled(rawRequest, metric, getClass()); return rawResponseHandler.handleResponse(response); }; HttpRequest request = (HttpRequest) rawRequest; try { request.setTimeout(getProperty(request, TIMEOUT, timeoutMillisParser) .orElse(defaultTimeout.toMillis()), TimeUnit.MILLISECONDS); Path requestPath = new Path(request.getUri()); for (String path : handlers.keySet()) if (requestPath.matches(path)) { Map<Method, Handler> methods = handlers.get(path); if (methods.containsKey(request.getMethod())) return methods.get(request.getMethod()).handle(request, new DocumentPath(requestPath), responseHandler); if (request.getMethod() == OPTIONS) options(methods.keySet(), responseHandler); methodNotAllowed(request, methods.keySet(), responseHandler); } notFound(request, handlers.keySet(), responseHandler); } catch (IllegalArgumentException e) { badRequest(request, e, responseHandler); } catch (RuntimeException e) { serverError(request, e, responseHandler); } return ignoredContent; } @Override public void handleTimeout(Request request, ResponseHandler responseHandler) { timeout((HttpRequest) request, "Request timeout after " + request.getTimeout(TimeUnit.MILLISECONDS) + "ms", responseHandler); } @Override public void destroy() { executor.shutdown(); Instant doom = clock.instant().plus(Duration.ofSeconds(20)); while ( ! operations.isEmpty() && clock.instant().isBefore(doom)) dispatchEnqueued(); if ( ! operations.isEmpty()) log.log(WARNING, "Failed to empty request queue before shutdown timeout — " + operations.size() + " requests left"); asyncSession.destroy(); visits.values().forEach(VisitorSession::destroy); try { if ( ! executor.awaitTermination(Duration.between(clock.instant(), doom).toMillis(), TimeUnit.MILLISECONDS)) executor.shutdownNow(); } catch (InterruptedException e) { log.log(WARNING, "Interrupted waiting for /document/v1 executor to shut down"); } } @FunctionalInterface interface Handler { ContentChannel handle(HttpRequest request, DocumentPath path, ResponseHandler handler); } /** Defines all paths/methods handled by this handler. */ private Map<String, Map<Method, Handler>> defineApi() { Map<String, Map<Method, Handler>> handlers = new LinkedHashMap<>(); handlers.put("/document/v1/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); return Collections.unmodifiableMap(handlers); } private ContentChannel getDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { VisitorParameters parameters = parseParameters(request, path); return () -> { visit(request, parameters, handler); return true; }; }); return ignoredContent; } private ContentChannel getDocument(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { DocumentOperationParameters rawParameters = parametersFromRequest(request, CLUSTER, FIELD_SET); if (rawParameters.fieldSet().isEmpty()) rawParameters = rawParameters.withFieldSet(path.documentType().orElseThrow() + ":[document]"); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> { handle(path, handler, response, (document, jsonResponse) -> { if (document != null) { jsonResponse.writeSingleDocument(document); jsonResponse.commit(Response.Status.OK); } else jsonResponse.commit(Response.Status.NOT_FOUND); }); }); return () -> dispatchOperation(() -> asyncSession.get(path.id(), parameters)); }); return ignoredContent; } private ContentChannel postDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.PUT, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentPut put = parser.parsePut(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(put::setCondition); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(() -> asyncSession.put(put, parameters)); }); }); } private ContentChannel putDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.UPDATE, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentUpdate update = parser.parseUpdate(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(update::setCondition); getProperty(request, CREATE, booleanParser).ifPresent(update::setCreateIfNonExistent); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(() -> asyncSession.update(update, parameters)); }); }); } private ContentChannel deleteDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.REMOVE, clock.instant()); enqueueAndDispatch(request, handler, () -> { DocumentRemove remove = new DocumentRemove(path.id()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(remove::setCondition); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(() -> asyncSession.remove(remove, parameters)); }); return ignoredContent; } /** Dispatches enqueued requests until one is blocked. */ void dispatchEnqueued() { try { while (dispatchFirst()); } catch (Exception e) { log.log(WARNING, "Uncaught exception in /document/v1 dispatch thread", e); } } /** Attempts to dispatch the first enqueued operations, and returns whether this was successful. */ private boolean dispatchFirst() { Operation operation = operations.poll(); if (operation == null) return false; if (operation.dispatch()) { enqueued.decrementAndGet(); return true; } operations.push(operation); return false; } /** * Enqueues the given request and operation, or responds with "overload" if the queue is full, * and then attempts to dispatch an enqueued operation from the head of the queue. */ private void enqueueAndDispatch(HttpRequest request, ResponseHandler handler, Supplier<Supplier<Boolean>> operationParser) { if (enqueued.incrementAndGet() > maxThrottled) { enqueued.decrementAndGet(); overload(request, "Rejecting execution due to overload: " + maxThrottled + " requests already enqueued", handler); return; } operations.offer(new Operation(request, handler) { @Override Supplier<Boolean> parse() { return operationParser.get(); } }); dispatchFirst(); } /** Class for writing and returning JSON responses to document operations in a thread safe manner. */ private static class JsonResponse implements AutoCloseable { private final BufferedContentChannel buffer = new BufferedContentChannel(); private final OutputStream out = new ContentChannelOutputStream(buffer); private final JsonGenerator json = jsonFactory.createGenerator(out); private final ResponseHandler handler; private ContentChannel channel; private JsonResponse(ResponseHandler handler) throws IOException { this.handler = handler; json.writeStartObject(); } /** Creates a new JsonResponse with path and id fields written. */ static JsonResponse create(DocumentPath path, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(path.rawPath()); response.writeDocId(path.id()); return response; } /** Creates a new JsonResponse with path field written. */ static JsonResponse create(HttpRequest request, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); return response; } /** Creates a new JsonResponse with path and message fields written. */ static JsonResponse create(HttpRequest request, String message, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); response.writeMessage(message); return response; } /** Commits a response with the given status code and some default headers, and writes whatever content is buffered. */ synchronized void commit(int status) throws IOException { Response response = new Response(status); response.headers().addAll(Map.of("Content-Type", List.of("application/json; charset=UTF-8"))); try { channel = handler.handleResponse(response); buffer.connectTo(channel); } catch (RuntimeException e) { throw new IOException(e); } } /** Commits a response with the given status code and some default headers, writes buffered content, and closes this. */ synchronized void respond(int status) throws IOException { try (this) { commit(status); } } /** Closes the JSON and the output content channel of this. */ @Override public synchronized void close() throws IOException { try { if (channel == null) { log.log(WARNING, "Close called before response was committed, in " + getClass().getName()); commit(Response.Status.INTERNAL_SERVER_ERROR); } json.close(); out.close(); } finally { if (channel != null) channel.close(logException); } } synchronized void writePathId(String path) throws IOException { json.writeStringField("pathId", path); } synchronized void writeMessage(String message) throws IOException { json.writeStringField("message", message); } synchronized void writeDocId(DocumentId id) throws IOException { json.writeStringField("id", id.toString()); } synchronized void writeTrace(Trace trace) throws IOException { if (trace != null && ! trace.getRoot().isEmpty()) { writeTrace(trace.getRoot()); } } private void writeTrace(TraceNode node) throws IOException { if (node.hasNote()) json.writeStringField("message", node.getNote()); if ( ! node.isLeaf()) { json.writeArrayFieldStart(node.isStrict() ? "trace" : "fork"); for (int i = 0; i < node.getNumChildren(); i++) { json.writeStartObject(); writeTrace(node.getChild(i)); json.writeEndObject(); } json.writeEndArray(); } } synchronized void writeSingleDocument(Document document) throws IOException { new JsonWriter(json).writeFields(document); } synchronized void writeDocumentsArrayStart() throws IOException { json.writeArrayFieldStart("documents"); } synchronized void writeDocumentValue(Document document) throws IOException { new JsonWriter(json).write(document); } synchronized void writeArrayEnd() throws IOException { json.writeEndArray(); } synchronized void writeContinuation(String token) throws IOException { json.writeStringField("continuation", token); } } private static void options(Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { Response response = new Response(Response.Status.NO_CONTENT); response.headers().add("Allow", methods.stream().sorted().map(Method::name).collect(joining(","))); handler.handleResponse(response).close(logException); }); } private static void badRequest(HttpRequest request, IllegalArgumentException e, ResponseHandler handler) { loggingException(() -> { String message = Exceptions.toMessageString(e); log.log(FINE, () -> "Bad request for " + request.getMethod() + " at " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.BAD_REQUEST); }); } private static void notFound(HttpRequest request, Collection<String> paths, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "Nothing at '" + request.getUri().getRawPath() + "'. " + "Available paths are:\n" + String.join("\n", paths), handler) .respond(Response.Status.NOT_FOUND); }); } private static void methodNotAllowed(HttpRequest request, Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "'" + request.getMethod() + "' not allowed at '" + request.getUri().getRawPath() + "'. " + "Allowed methods are: " + methods.stream().sorted().map(Method::name).collect(joining(", ")), handler) .respond(Response.Status.METHOD_NOT_ALLOWED); }); } private static void overload(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Overload handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.TOO_MANY_REQUESTS); }); } private static void serverError(HttpRequest request, Throwable t, ResponseHandler handler) { loggingException(() -> { log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ":", t); JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.INTERNAL_SERVER_ERROR); }); } private static void timeout(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Timeout handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.GATEWAY_TIMEOUT); }); } private static void loggingException(Exceptions.RunnableThrowingIOException runnable) { try { runnable.run(); } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static abstract class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ boolean dispatch() { if (request.isCancelled()) return true; if ( ! lock.tryLock()) throw new IllegalStateException("Concurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; } abstract Supplier<Boolean> parse(); } /** Attempts to send the given document operation, returning false if thes needs to be retried. */ private static boolean dispatchOperation(Supplier<Result> documentOperation) { Result result = documentOperation.get(); if (result.type() == Result.ResultType.TRANSIENT_ERROR) return false; if (result.type() == Result.ResultType.FATAL_ERROR) throw new RuntimeException(result.getError()); return true; } /** Readable content channel which forwards data to a reader when closed. */ static class ForwardingContentChannel implements ContentChannel { private final ReadableContentChannel delegate = new ReadableContentChannel(); private final Consumer<InputStream> reader; public ForwardingContentChannel(Consumer<InputStream> reader) { this.reader = reader; } /** Write is complete when we have stored the buffer — call completion handler. */ @Override public void write(ByteBuffer buf, CompletionHandler handler) { try { delegate.write(buf, logException); handler.completed(); } catch (Exception e) { handler.failed(e); } } /** Close is complete when we have closed the buffer. */ @Override public void close(CompletionHandler handler) { try { delegate.close(logException); reader.accept(new UnsafeContentInputStream(delegate)); handler.completed(); } catch (Exception e) { handler.failed(e); } } } static class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } DocumentPut parsePut(InputStream inputStream, String docId) { return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT); } DocumentUpdate parseUpdate(InputStream inputStream, String docId) { return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE); } private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } } interface SuccessCallback { void onSuccess(Document document, JsonResponse response) throws IOException; } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response, SuccessCallback callback) { try (JsonResponse jsonResponse = JsonResponse.create(path, handler)) { jsonResponse.writeTrace(response.getTrace()); if (response.isSuccess()) callback.onSuccess((response instanceof DocumentResponse) ? ((DocumentResponse) response).getDocument() : null, jsonResponse); else { jsonResponse.writeMessage(response.getTextMessage()); switch (response.outcome()) { case NOT_FOUND: jsonResponse.commit(Response.Status.NOT_FOUND); break; case CONDITION_FAILED: jsonResponse.commit(Response.Status.PRECONDITION_FAILED); break; case INSUFFICIENT_STORAGE: jsonResponse.commit(Response.Status.INSUFFICIENT_STORAGE); break; default: log.log(WARNING, "Unexpected document API operation outcome '" + response.outcome() + "'"); case ERROR: log.log(FINE, () -> "Exception performing document operation: " + response.getTextMessage()); jsonResponse.commit(Response.Status.INTERNAL_SERVER_ERROR); } } } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response) { handle(path, handler, response, (document, jsonResponse) -> jsonResponse.commit(Response.Status.OK)); } private VisitorParameters parseParameters(HttpRequest request, DocumentPath path) { int wantedDocumentCount = Math.min(1 << 10, getProperty(request, WANTED_DOCUMENT_COUNT, integerParser).orElse(1)); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); int concurrency = Math.min(100, getProperty(request, CONCURRENCY, integerParser).orElse(1)); if (concurrency <= 0) throw new IllegalArgumentException("concurrency must be positive"); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = new VisitorParameters(Stream.of(getProperty(request, SELECTION), path.documentType(), path.namespace().map(value -> "id.namespace=='" + value + "'"), path.group().map(Group::selection)) .flatMap(Optional::stream) .reduce(new StringJoiner(") and (", "(", ")").setEmptyValue(""), StringJoiner::add, StringJoiner::merge) .toString()); getProperty(request, CONTINUATION).map(ProgressToken::fromSerializedString).ifPresent(parameters::setResumeToken); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(concurrency)); parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - 5000)); parameters.visitInconsistentBuckets(true); parameters.setPriority(DocumentProtocol.Priority.NORMAL_4); StorageCluster storageCluster = resolveCluster(cluster, clusters); parameters.setRoute(storageCluster.route()); parameters.setBucketSpace(resolveBucket(storageCluster, path.documentType(), List.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()), getProperty(request, BUCKET_SPACE))); return parameters; } private void visit(HttpRequest request, VisitorParameters parameters, ResponseHandler handler) { try { JsonResponse response = JsonResponse.create(request, handler); response.writeDocumentsArrayStart(); CountDownLatch latch = new CountDownLatch(1); parameters.setLocalDataHandler(new DumpVisitorDataHandler() { @Override public void onDocument(Document doc, long timeStamp) { loggingException(() -> { response.writeDocumentValue(doc); }); } @Override public void onRemove(DocumentId id) { } }); parameters.setControlHandler(new VisitorControlHandler() { @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); loggingException(() -> { response.writeArrayEnd(); switch (code) { case TIMEOUT: if ( ! hasVisitedAnyBuckets()) { response.writeMessage("No buckets visited within timeout of " + parameters.getSessionTimeoutMs() + "ms (request timeout -5s)"); response.respond(Response.Status.GATEWAY_TIMEOUT); break; } case SUCCESS: case ABORTED: if (getProgress() != null && ! getProgress().isFinished()) response.writeContinuation(getProgress().serializeToString()); response.respond(Response.Status.OK); break; default: response.writeMessage(message != null ? message : "Visiting failed"); response.respond(Response.Status.INTERNAL_SERVER_ERROR); } executor.execute(() -> { try { latch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } visits.remove(this).destroy(); }); }); } }); visits.put(parameters.getControlHandler(), access.createVisitorSession(parameters)); latch.countDown(); } catch (ParseException e) { badRequest(request, new IllegalArgumentException(e), handler); } catch (IOException e) { log.log(FINE, "Failed writing response", e); } } private static Optional<String> getProperty(HttpRequest request, String name) { if ( ! request.parameters().containsKey(name)) return Optional.empty(); List<String> values = request.parameters().get(name); String value; if (values == null || values.isEmpty() || (value = values.get(values.size() - 1)) == null || value.isEmpty()) throw new IllegalArgumentException("Expected non-empty value for request property '" + name + "'"); return Optional.of(value); } private static <T> Optional<T> getProperty(HttpRequest request, String name, Parser<T> parser) { return getProperty(request, name).map(parser::parse); } @FunctionalInterface interface Parser<T> extends Function<String, T> { default T parse(String value) { try { return apply(value); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing '" + value + "': " + Exceptions.toMessageString(e)); } } } private class MeasuringResponseHandler implements ResponseHandler { private final ResponseHandler delegate; private final com.yahoo.documentapi.metrics.DocumentOperationType type; private final Instant start; private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) { this.delegate = delegate; this.type = type; this.start = start; } @Override public ContentChannel handleResponse(Response response) { switch (response.getStatus() / 100) { case 2: metrics.reportSuccessful(type, start); break; case 4: metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR); break; case 5: metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR); break; } return delegate.handleResponse(response); } } static class StorageCluster { private final String name; private final Map<String, String> documentBuckets; StorageCluster(String name, Map<String, String> documentBuckets) { this.name = requireNonNull(name); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return name() + "-direct"; } Optional<String> bucketOf(String documentType) { return Optional.ofNullable(documentBuckets.get(documentType)); } } private static Map<String, StorageCluster> parseClusters(ClusterListConfig clusters, AllClustersBucketSpacesConfig buckets) { return clusters.storage().stream() .collect(toUnmodifiableMap(storage -> storage.name(), storage -> new StorageCluster(storage.name(), buckets.cluster(storage.name()) .documentType().entrySet().stream() .collect(toMap(entry -> entry.getKey(), entry -> entry.getValue().bucketSpace()))))); } static StorageCluster resolveCluster(Optional<String> wanted, Map<String, StorageCluster> clusters) { if (clusters.isEmpty()) throw new IllegalArgumentException("Your Vespa deployment has no content clusters, so the document API is not enabled"); return wanted.map(cluster -> { if ( ! clusters.containsKey(cluster)) throw new IllegalArgumentException("Your Vespa deployment has no content cluster '" + cluster + "', only '" + String.join("', '", clusters.keySet()) + "'"); return clusters.get(cluster); }).orElseGet(() -> { if (clusters.size() > 1) throw new IllegalArgumentException("Please specify one of the content clusters in your Vespa deployment: '" + String.join("', '", clusters.keySet()) + "'"); return clusters.values().iterator().next(); }); } static String resolveBucket(StorageCluster cluster, Optional<String> documentType, List<String> bucketSpaces, Optional<String> bucketSpace) { return documentType.map(type -> cluster.bucketOf(type) .orElseThrow(() -> new IllegalArgumentException("Document type '" + type + "' in cluster '" + cluster.name() + "' is not mapped to a known bucket space"))) .or(() -> bucketSpace.map(space -> { if ( ! bucketSpaces.contains(space)) throw new IllegalArgumentException("Bucket space '" + space + "' is not a known bucket space; expected one of " + String.join(", ", bucketSpaces)); return space; })) .orElse(FixedBucketSpaces.defaultSpace()); } private static class DocumentPath { private final Path path; private final Optional<Group> group; DocumentPath(Path path) { this.path = requireNonNull(path); this.group = Optional.ofNullable(path.get("number")).map(integerParser::parse).map(Group::of) .or(() -> Optional.ofNullable(path.get("group")).map(Group::of)); } DocumentId id() { return new DocumentId("id:" + requireNonNull(path.get("namespace")) + ":" + requireNonNull(path.get("documentType")) + ":" + group.map(Group::docIdPart).orElse("") + ":" + requireNonNull(path.getRest())); } String rawPath() { return path.asString(); } Optional<String> documentType() { return Optional.ofNullable(path.get("documentType")); } Optional<String> namespace() { return Optional.ofNullable(path.get("namespace")); } Optional<Group> group() { return group; } } static class Group { private final String value; private final String docIdPart; private final String selection; private Group(String value, String docIdPart, String selection) { Text.validateTextString(value) .ifPresent(codePoint -> { throw new IllegalArgumentException(String.format("Illegal code point U%04X in group", codePoint)); }); this.value = value; this.docIdPart = docIdPart; this.selection = selection; } public static Group of(long value) { return new Group(Long.toString(value), "n=" + value, "id.user==" + value); } public static Group of(String value) { return new Group(value, "g=" + value, "id.group=='" + value.replaceAll("'", "\\\\'") + "'"); } public String value() { return value; } public String docIdPart() { return docIdPart; } public String selection() { return selection; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Group group = (Group) o; return value.equals(group.value) && docIdPart.equals(group.docIdPart) && selection.equals(group.selection); } @Override public int hashCode() { return Objects.hash(value, docIdPart, selection); } @Override public String toString() { return "Group{" + "value='" + value + '\'' + ", docIdPart='" + docIdPart + '\'' + ", selection='" + selection + '\'' + '}'; } } }
return this or throw?
public ACLCreateModeStatBackgroundPathAndBytesable<String> withProtection() { return null; }
return null;
public ACLCreateModeStatBackgroundPathAndBytesable<String> withProtection() { throw new UnsupportedOperationException("Not implemented in MockCurator"); }
class MockCreateBuilder implements CreateBuilder { private boolean createParents = false; private CreateMode createMode = CreateMode.PERSISTENT; @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentsIfNeeded() { createParents = true; return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentContainersIfNeeded() { return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override @Deprecated public ACLPathAndBytesable<String> withProtectedEphemeralSequential() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public ErrorListenerPathAndBytesable<String> inBackground() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public CreateBuilderMain withTtl(long l) { return null; } @Override public CreateBuilder2 orSetData() { return null; } @Override public CreateBuilder2 orSetData(int i) { return null; } @Override public CreateBackgroundModeStatACLable compressed() { return null; } @Override public CreateProtectACLCreateModePathAndBytesable<String> storingStatIn(Stat stat) { return null; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list) { return null; } @Override public ACLBackgroundPathAndBytesable<String> withMode(CreateMode createMode) { this.createMode = createMode; return this; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list, boolean b) { return null; } }
class MockCreateBuilder implements CreateBuilder { private boolean createParents = false; private CreateMode createMode = CreateMode.PERSISTENT; @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentsIfNeeded() { createParents = true; return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentContainersIfNeeded() { return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override @Deprecated public ACLPathAndBytesable<String> withProtectedEphemeralSequential() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public ErrorListenerPathAndBytesable<String> inBackground() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public CreateBuilderMain withTtl(long l) { return this; } @Override public CreateBuilder2 orSetData() { return null; } @Override public CreateBuilder2 orSetData(int i) { return null; } @Override public CreateBackgroundModeStatACLable compressed() { return null; } @Override public CreateProtectACLCreateModePathAndBytesable<String> storingStatIn(Stat stat) { return null; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list) { return this; } @Override public ACLBackgroundPathAndBytesable<String> withMode(CreateMode createMode) { this.createMode = createMode; return this; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list, boolean b) { return this; } }
These should throw or return this?
public CreateBuilderMain withTtl(long l) { return null; }
return null;
public CreateBuilderMain withTtl(long l) { return this; }
class MockCreateBuilder implements CreateBuilder { private boolean createParents = false; private CreateMode createMode = CreateMode.PERSISTENT; @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentsIfNeeded() { createParents = true; return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentContainersIfNeeded() { return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override @Deprecated public ACLPathAndBytesable<String> withProtectedEphemeralSequential() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ACLCreateModeStatBackgroundPathAndBytesable<String> withProtection() { return null; } public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public ErrorListenerPathAndBytesable<String> inBackground() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override @Override public CreateBuilder2 orSetData() { return null; } @Override public CreateBuilder2 orSetData(int i) { return null; } @Override public CreateBackgroundModeStatACLable compressed() { return null; } @Override public CreateProtectACLCreateModePathAndBytesable<String> storingStatIn(Stat stat) { return null; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list) { return null; } @Override public ACLBackgroundPathAndBytesable<String> withMode(CreateMode createMode) { this.createMode = createMode; return this; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list, boolean b) { return null; } }
class MockCreateBuilder implements CreateBuilder { private boolean createParents = false; private CreateMode createMode = CreateMode.PERSISTENT; @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentsIfNeeded() { createParents = true; return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentContainersIfNeeded() { return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override @Deprecated public ACLPathAndBytesable<String> withProtectedEphemeralSequential() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ACLCreateModeStatBackgroundPathAndBytesable<String> withProtection() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public ErrorListenerPathAndBytesable<String> inBackground() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override @Override public CreateBuilder2 orSetData() { return null; } @Override public CreateBuilder2 orSetData(int i) { return null; } @Override public CreateBackgroundModeStatACLable compressed() { return null; } @Override public CreateProtectACLCreateModePathAndBytesable<String> storingStatIn(Stat stat) { return null; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list) { return this; } @Override public ACLBackgroundPathAndBytesable<String> withMode(CreateMode createMode) { this.createMode = createMode; return this; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list, boolean b) { return this; } }
Fixed some of these
public CreateBuilderMain withTtl(long l) { return null; }
return null;
public CreateBuilderMain withTtl(long l) { return this; }
class MockCreateBuilder implements CreateBuilder { private boolean createParents = false; private CreateMode createMode = CreateMode.PERSISTENT; @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentsIfNeeded() { createParents = true; return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentContainersIfNeeded() { return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override @Deprecated public ACLPathAndBytesable<String> withProtectedEphemeralSequential() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ACLCreateModeStatBackgroundPathAndBytesable<String> withProtection() { return null; } public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public ErrorListenerPathAndBytesable<String> inBackground() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override @Override public CreateBuilder2 orSetData() { return null; } @Override public CreateBuilder2 orSetData(int i) { return null; } @Override public CreateBackgroundModeStatACLable compressed() { return null; } @Override public CreateProtectACLCreateModePathAndBytesable<String> storingStatIn(Stat stat) { return null; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list) { return null; } @Override public ACLBackgroundPathAndBytesable<String> withMode(CreateMode createMode) { this.createMode = createMode; return this; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list, boolean b) { return null; } }
class MockCreateBuilder implements CreateBuilder { private boolean createParents = false; private CreateMode createMode = CreateMode.PERSISTENT; @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentsIfNeeded() { createParents = true; return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentContainersIfNeeded() { return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override @Deprecated public ACLPathAndBytesable<String> withProtectedEphemeralSequential() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ACLCreateModeStatBackgroundPathAndBytesable<String> withProtection() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public ErrorListenerPathAndBytesable<String> inBackground() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override @Override public CreateBuilder2 orSetData() { return null; } @Override public CreateBuilder2 orSetData(int i) { return null; } @Override public CreateBackgroundModeStatACLable compressed() { return null; } @Override public CreateProtectACLCreateModePathAndBytesable<String> storingStatIn(Stat stat) { return null; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list) { return this; } @Override public ACLBackgroundPathAndBytesable<String> withMode(CreateMode createMode) { this.createMode = createMode; return this; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list, boolean b) { return this; } }
Fixed
public ACLCreateModeStatBackgroundPathAndBytesable<String> withProtection() { return null; }
return null;
public ACLCreateModeStatBackgroundPathAndBytesable<String> withProtection() { throw new UnsupportedOperationException("Not implemented in MockCurator"); }
class MockCreateBuilder implements CreateBuilder { private boolean createParents = false; private CreateMode createMode = CreateMode.PERSISTENT; @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentsIfNeeded() { createParents = true; return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentContainersIfNeeded() { return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override @Deprecated public ACLPathAndBytesable<String> withProtectedEphemeralSequential() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public ErrorListenerPathAndBytesable<String> inBackground() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public CreateBuilderMain withTtl(long l) { return null; } @Override public CreateBuilder2 orSetData() { return null; } @Override public CreateBuilder2 orSetData(int i) { return null; } @Override public CreateBackgroundModeStatACLable compressed() { return null; } @Override public CreateProtectACLCreateModePathAndBytesable<String> storingStatIn(Stat stat) { return null; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list) { return null; } @Override public ACLBackgroundPathAndBytesable<String> withMode(CreateMode createMode) { this.createMode = createMode; return this; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list, boolean b) { return null; } }
class MockCreateBuilder implements CreateBuilder { private boolean createParents = false; private CreateMode createMode = CreateMode.PERSISTENT; @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentsIfNeeded() { createParents = true; return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override public ProtectACLCreateModeStatPathAndBytesable<String> creatingParentContainersIfNeeded() { return new MockProtectACLCreateModeStatPathAndBytesable<>() { @Override public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } }; } @Override @Deprecated public ACLPathAndBytesable<String> withProtectedEphemeralSequential() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public String forPath(String s) throws Exception { return createNode(s, new byte[0], createParents, createMode, fileSystem.root(), listeners); } public String forPath(String s, byte[] bytes) throws Exception { return createNode(s, bytes, createParents, createMode, fileSystem.root(), listeners); } @Override public ErrorListenerPathAndBytesable<String> inBackground() { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public ErrorListenerPathAndBytesable<String> inBackground(BackgroundCallback backgroundCallback, Object o, Executor executor) { throw new UnsupportedOperationException("Not implemented in MockCurator"); } @Override public CreateBuilderMain withTtl(long l) { return this; } @Override public CreateBuilder2 orSetData() { return null; } @Override public CreateBuilder2 orSetData(int i) { return null; } @Override public CreateBackgroundModeStatACLable compressed() { return null; } @Override public CreateProtectACLCreateModePathAndBytesable<String> storingStatIn(Stat stat) { return null; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list) { return this; } @Override public ACLBackgroundPathAndBytesable<String> withMode(CreateMode createMode) { this.createMode = createMode; return this; } @Override public BackgroundPathAndBytesable<String> withACL(List<ACL> list, boolean b) { return this; } }
will this be a no-op if the container is already stopped?
public boolean retainManagedContainers(Set<ContainerName> containerNames) { return containerEngine.listManagedContainers(MANAGER_NAME).stream() .filter(containerName -> ! containerNames.contains(containerName)) .peek(containerName -> { containerEngine.stopContainer(containerName); containerEngine.deleteContainer(containerName); }).count() > 0; }
containerEngine.stopContainer(containerName);
public boolean retainManagedContainers(Set<ContainerName> containerNames) { return containerEngine.listManagedContainers(MANAGER_NAME).stream() .filter(containerName -> ! containerNames.contains(containerName)) .peek(containerName -> { containerEngine.stopContainer(containerName); containerEngine.deleteContainer(containerName); }).count() > 0; }
class ContainerOperationsImpl implements ContainerOperations { private static final Logger logger = Logger.getLogger(ContainerOperationsImpl.class.getName()); static final String MANAGER_NAME = "node-admin"; private static final InetAddress IPV6_NPT_PREFIX = InetAddresses.forString("fd00::"); private static final InetAddress IPV4_NPT_PREFIX = InetAddresses.forString("172.17.0.0"); private static final String ETC_MACHINE_ID = "/etc/machine-id"; private static final Random random = new Random(System.nanoTime()); private final ContainerEngine containerEngine; private final Terminal terminal; private final IPAddresses ipAddresses; private final FileSystem fileSystem; public ContainerOperationsImpl(ContainerEngine containerEngine, Terminal terminal, IPAddresses ipAddresses, FileSystem fileSystem) { this.containerEngine = containerEngine; this.terminal = terminal; this.ipAddresses = ipAddresses; this.fileSystem = fileSystem; } @Override public void createContainer(NodeAgentContext context, ContainerData containerData, ContainerResources containerResources) { context.log(logger, "Creating container"); ContainerEngine.CreateContainerCommand command = containerEngine.createContainerCommand( context.node().wantedDockerImage().get(), context.containerName()) .withHostName(context.node().hostname()) .withResources(containerResources) .withManagedBy(MANAGER_NAME) .withDnsOption("inet6") .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 409_600, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN") .withAddCapability("SYS_NICE"); if (context.nodeType() != NodeType.proxy && context.nodeType() != NodeType.controller) command.withSecurityOpt("no-new-privileges"); if (context.node().membership().map(m -> m.type().isContent()).orElse(false)) command.withSecurityOpt("seccomp=unconfined"); DockerNetworking networking = context.dockerNetworking(); command.withNetworkMode(networking.getDockerNetworkMode()); if (networking == DockerNetworking.NPT) { Optional<? extends InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname()); Optional<? extends InetAddress> ipV6Local = ipAddresses.getIPv6Address(context.node().hostname()); assertEqualIpAddresses(context.hostname(), ipV4Local, context.node().ipAddresses(), IPVersion.IPv4); assertEqualIpAddresses(context.hostname(), ipV6Local, context.node().ipAddresses(), IPVersion.IPv6); if (ipV4Local.isEmpty() && ipV6Local.isEmpty()) { throw new ConvergenceException("Container " + context.node().hostname() + " with " + networking + " networking must have at least 1 IP address, but found none"); } ipV6Local = ipV6Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV6_NPT_PREFIX, 8)); ipV6Local.ifPresent(command::withIpAddress); ipV4Local = ipV4Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV4_NPT_PREFIX, 2)); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local); } else if (networking == DockerNetworking.LOCAL) { var ipv4Address = ipAddresses.getIPv4Address(context.node().hostname()) .orElseThrow(() -> new IllegalArgumentException("No IPv4 address could be resolved from '" + context.hostname()+ "'")); command.withIpAddress(ipv4Address); } UnixPath machineIdPath = new UnixPath(context.pathOnHostFromPathInNode(ETC_MACHINE_ID)); if (!machineIdPath.exists()) { String machineId = String.format("%16x%16x\n", random.nextLong(), random.nextLong()); machineIdPath.createParents().writeUtf8File(machineId); context.log(logger, "Wrote " + machineId + " to " + machineIdPath); } addMounts(context, command); logger.info("Creating new container with args: " + command); command.create(); } private static void assertEqualIpAddresses(HostName hostName, Optional<? extends InetAddress> resolvedAddress, Set<String> nrAddresses, IPVersion ipVersion) { Optional<InetAddress> nrAddress = nrAddresses.stream() .map(InetAddresses::forString) .filter(ipVersion::match) .findFirst(); if (resolvedAddress.equals(nrAddress)) return; throw new ConvergenceException(String.format( "IP address (%s) resolved from %s does not match IP address (%s) in node-repo", resolvedAddress.map(InetAddresses::toAddrString).orElse("[none]"), hostName, nrAddress.map(InetAddresses::toAddrString).orElse("[none]"))); } void addEtcHosts(ContainerData containerData, String hostname, Optional<? extends InetAddress> ipV4Local, Optional<? extends InetAddress> ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n"); ipV6Local.ifPresent(ipv6 -> etcHosts.append(ipv6.getHostAddress()).append('\t').append(hostname).append('\n')); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress()).append('\t').append(hostname).append('\n')); containerData.addFile(fileSystem.getPath("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); containerEngine.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); containerEngine.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); containerEngine.deleteContainer(context.containerName()); } @Override public void updateContainer(NodeAgentContext context, ContainerResources containerResources) { containerEngine.updateContainer(context.containerName(), containerResources); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return containerEngine.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage, RegistryCredentials registryCredentials) { return containerEngine.pullImageAsyncIfNeeded(dockerImage, registryCredentials); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command) { int containerPid = containerEngine.getContainer(context.containerName()) .filter(container -> container.state.isRunning()) .orElseThrow(() -> new RuntimeException( "Found no running container named " + context.containerName().asString())) .pid; return terminal.newCommandLine(context) .add("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--") .add(command) .executeSilently(); } @Override public void resumeNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "resume"); } @Override public void suspendNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "suspend"); } @Override public void restartVespa(NodeAgentContext context) { executeNodeCtlInContainer(context, "restart-vespa"); } @Override public void startServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "start"); } @Override public void stopServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "stop"); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return containerEngine.getContainerStats(context.containerName()); } private void addMounts(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { var volumes = new VolumeHelper(context, command); volumes.addPrivateVolumes( ETC_MACHINE_ID, "/etc/vespa/flags", "/etc/yamas-agent", "/opt/splunkforwarder/var/log", "/var/log", "/var/log/journal", "/var/spool/postfix/maildrop", "logs/vespa", "logs/ysar", "tmp", "var/crash", "var/container-data", "var/db/vespa", "var/jdisc_container", "var/vespa", "var/yca", "var/zookeeper"); if (context.nodeType() == NodeType.proxy) { volumes.addPrivateVolumes("logs/nginx", "var/vespa-hosted/routing"); } else if (context.nodeType() == NodeType.tenant) volumes.addPrivateVolumes("/var/lib/sia"); if (isInfrastructureHost(context.nodeType())) volumes.addSharedVolumeMap("/var/lib/sia", "/var/lib/sia"); boolean isMain = context.zone().getSystemName() == SystemName.cd || context.zone().getSystemName() == SystemName.main; if (isMain && context.nodeType() == NodeType.tenant) volumes.addSharedVolumeMap("/var/zpe", "var/zpe"); } @Override public boolean noManagedContainersRunning() { return containerEngine.noManagedContainersRunning(MANAGER_NAME); } @Override @Override public boolean deleteUnusedContainerImages(List<DockerImage> excludes, Duration minImageAgeToDelete) { return containerEngine.deleteUnusedDockerImages(excludes, minImageAgeToDelete); } /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.config || nodeType == NodeType.proxy || nodeType == NodeType.controller; } private static class VolumeHelper { private final NodeAgentContext context; private final ContainerEngine.CreateContainerCommand command; public VolumeHelper(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { this.context = context; this.command = command; } /** * Resolve each path to an absolute relative the container's vespa home directory. * Mounts the resulting path, under the container's storage directory as path in the container. */ public void addPrivateVolumes(String... pathsInNode) { Stream.of(pathsInNode).forEach(pathString -> { Path absolutePathInNode = resolveNodePath(pathString); Path pathOnHost = context.pathOnHostFromPathInNode(absolutePathInNode); command.withVolume(pathOnHost, absolutePathInNode); }); } /** * Mounts pathOnHost on the host as pathInNode in the container. Use for paths that * might be shared with other containers. */ public void addSharedVolumeMap(String pathOnHost, String pathInNode) { command.withSharedVolume(resolveNodePath(pathOnHost), resolveNodePath(pathInNode)); } private Path resolveNodePath(String pathString) { Path path = context.fileSystem().getPath(pathString); return path.isAbsolute() ? path : context.pathInNodeUnderVespaHome(path); } } }
class ContainerOperationsImpl implements ContainerOperations { private static final Logger logger = Logger.getLogger(ContainerOperationsImpl.class.getName()); static final String MANAGER_NAME = "node-admin"; private static final InetAddress IPV6_NPT_PREFIX = InetAddresses.forString("fd00::"); private static final InetAddress IPV4_NPT_PREFIX = InetAddresses.forString("172.17.0.0"); private static final String ETC_MACHINE_ID = "/etc/machine-id"; private static final Random random = new Random(System.nanoTime()); private final ContainerEngine containerEngine; private final Terminal terminal; private final IPAddresses ipAddresses; private final FileSystem fileSystem; public ContainerOperationsImpl(ContainerEngine containerEngine, Terminal terminal, IPAddresses ipAddresses, FileSystem fileSystem) { this.containerEngine = containerEngine; this.terminal = terminal; this.ipAddresses = ipAddresses; this.fileSystem = fileSystem; } @Override public void createContainer(NodeAgentContext context, ContainerData containerData, ContainerResources containerResources) { context.log(logger, "Creating container"); ContainerEngine.CreateContainerCommand command = containerEngine.createContainerCommand( context.node().wantedDockerImage().get(), context.containerName()) .withHostName(context.node().hostname()) .withResources(containerResources) .withManagedBy(MANAGER_NAME) .withDnsOption("inet6") .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 409_600, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN") .withAddCapability("SYS_NICE"); if (context.nodeType() != NodeType.proxy && context.nodeType() != NodeType.controller) command.withSecurityOpt("no-new-privileges"); if (context.node().membership().map(m -> m.type().isContent()).orElse(false)) command.withSecurityOpt("seccomp=unconfined"); DockerNetworking networking = context.dockerNetworking(); command.withNetworkMode(networking.getDockerNetworkMode()); if (networking == DockerNetworking.NPT) { Optional<? extends InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname()); Optional<? extends InetAddress> ipV6Local = ipAddresses.getIPv6Address(context.node().hostname()); assertEqualIpAddresses(context.hostname(), ipV4Local, context.node().ipAddresses(), IPVersion.IPv4); assertEqualIpAddresses(context.hostname(), ipV6Local, context.node().ipAddresses(), IPVersion.IPv6); if (ipV4Local.isEmpty() && ipV6Local.isEmpty()) { throw new ConvergenceException("Container " + context.node().hostname() + " with " + networking + " networking must have at least 1 IP address, but found none"); } ipV6Local = ipV6Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV6_NPT_PREFIX, 8)); ipV6Local.ifPresent(command::withIpAddress); ipV4Local = ipV4Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV4_NPT_PREFIX, 2)); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local); } else if (networking == DockerNetworking.LOCAL) { var ipv4Address = ipAddresses.getIPv4Address(context.node().hostname()) .orElseThrow(() -> new IllegalArgumentException("No IPv4 address could be resolved from '" + context.hostname()+ "'")); command.withIpAddress(ipv4Address); } UnixPath machineIdPath = new UnixPath(context.pathOnHostFromPathInNode(ETC_MACHINE_ID)); if (!machineIdPath.exists()) { String machineId = String.format("%16x%16x\n", random.nextLong(), random.nextLong()); machineIdPath.createParents().writeUtf8File(machineId); context.log(logger, "Wrote " + machineId + " to " + machineIdPath); } addMounts(context, command); logger.info("Creating new container with args: " + command); command.create(); } private static void assertEqualIpAddresses(HostName hostName, Optional<? extends InetAddress> resolvedAddress, Set<String> nrAddresses, IPVersion ipVersion) { Optional<InetAddress> nrAddress = nrAddresses.stream() .map(InetAddresses::forString) .filter(ipVersion::match) .findFirst(); if (resolvedAddress.equals(nrAddress)) return; throw new ConvergenceException(String.format( "IP address (%s) resolved from %s does not match IP address (%s) in node-repo", resolvedAddress.map(InetAddresses::toAddrString).orElse("[none]"), hostName, nrAddress.map(InetAddresses::toAddrString).orElse("[none]"))); } void addEtcHosts(ContainerData containerData, String hostname, Optional<? extends InetAddress> ipV4Local, Optional<? extends InetAddress> ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n"); ipV6Local.ifPresent(ipv6 -> etcHosts.append(ipv6.getHostAddress()).append('\t').append(hostname).append('\n')); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress()).append('\t').append(hostname).append('\n')); containerData.addFile(fileSystem.getPath("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); containerEngine.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); containerEngine.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); containerEngine.deleteContainer(context.containerName()); } @Override public void updateContainer(NodeAgentContext context, ContainerResources containerResources) { containerEngine.updateContainer(context.containerName(), containerResources); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return containerEngine.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage, RegistryCredentials registryCredentials) { return containerEngine.pullImageAsyncIfNeeded(dockerImage, registryCredentials); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command) { int containerPid = containerEngine.getContainer(context.containerName()) .filter(container -> container.state.isRunning()) .orElseThrow(() -> new RuntimeException( "Found no running container named " + context.containerName().asString())) .pid; return terminal.newCommandLine(context) .add("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--") .add(command) .executeSilently(); } @Override public void resumeNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "resume"); } @Override public void suspendNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "suspend"); } @Override public void restartVespa(NodeAgentContext context) { executeNodeCtlInContainer(context, "restart-vespa"); } @Override public void startServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "start"); } @Override public void stopServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "stop"); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return containerEngine.getContainerStats(context.containerName()); } private void addMounts(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { var volumes = new VolumeHelper(context, command); volumes.addPrivateVolumes( ETC_MACHINE_ID, "/etc/vespa/flags", "/etc/yamas-agent", "/opt/splunkforwarder/var/log", "/var/log", "/var/log/journal", "/var/spool/postfix/maildrop", "logs/vespa", "logs/ysar", "tmp", "var/crash", "var/container-data", "var/db/vespa", "var/jdisc_container", "var/vespa", "var/yca", "var/zookeeper"); if (context.nodeType() == NodeType.proxy) { volumes.addPrivateVolumes("logs/nginx", "var/vespa-hosted/routing"); } else if (context.nodeType() == NodeType.tenant) volumes.addPrivateVolumes("/var/lib/sia"); if (isInfrastructureHost(context.nodeType())) volumes.addSharedVolumeMap("/var/lib/sia", "/var/lib/sia"); boolean isMain = context.zone().getSystemName() == SystemName.cd || context.zone().getSystemName() == SystemName.main; if (isMain && context.nodeType() == NodeType.tenant) volumes.addSharedVolumeMap("/var/zpe", "var/zpe"); } @Override public boolean noManagedContainersRunning() { return containerEngine.noManagedContainersRunning(MANAGER_NAME); } @Override @Override public boolean deleteUnusedContainerImages(List<DockerImage> excludes, Duration minImageAgeToDelete) { return containerEngine.deleteUnusedDockerImages(excludes, minImageAgeToDelete); } /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.config || nodeType == NodeType.proxy || nodeType == NodeType.controller; } private static class VolumeHelper { private final NodeAgentContext context; private final ContainerEngine.CreateContainerCommand command; public VolumeHelper(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { this.context = context; this.command = command; } /** * Resolve each path to an absolute relative the container's vespa home directory. * Mounts the resulting path, under the container's storage directory as path in the container. */ public void addPrivateVolumes(String... pathsInNode) { Stream.of(pathsInNode).forEach(pathString -> { Path absolutePathInNode = resolveNodePath(pathString); Path pathOnHost = context.pathOnHostFromPathInNode(absolutePathInNode); command.withVolume(pathOnHost, absolutePathInNode); }); } /** * Mounts pathOnHost on the host as pathInNode in the container. Use for paths that * might be shared with other containers. */ public void addSharedVolumeMap(String pathOnHost, String pathInNode) { command.withSharedVolume(resolveNodePath(pathOnHost), resolveNodePath(pathInNode)); } private Path resolveNodePath(String pathString) { Path path = context.fileSystem().getPath(pathString); return path.isAbsolute() ? path : context.pathInNodeUnderVespaHome(path); } } }
Yes; https://github.com/vespa-engine/vespa/blob/130ac9c430496b5e31098818bff19b869d6c9f44/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerEngine.java#L231
public boolean retainManagedContainers(Set<ContainerName> containerNames) { return containerEngine.listManagedContainers(MANAGER_NAME).stream() .filter(containerName -> ! containerNames.contains(containerName)) .peek(containerName -> { containerEngine.stopContainer(containerName); containerEngine.deleteContainer(containerName); }).count() > 0; }
containerEngine.stopContainer(containerName);
public boolean retainManagedContainers(Set<ContainerName> containerNames) { return containerEngine.listManagedContainers(MANAGER_NAME).stream() .filter(containerName -> ! containerNames.contains(containerName)) .peek(containerName -> { containerEngine.stopContainer(containerName); containerEngine.deleteContainer(containerName); }).count() > 0; }
class ContainerOperationsImpl implements ContainerOperations { private static final Logger logger = Logger.getLogger(ContainerOperationsImpl.class.getName()); static final String MANAGER_NAME = "node-admin"; private static final InetAddress IPV6_NPT_PREFIX = InetAddresses.forString("fd00::"); private static final InetAddress IPV4_NPT_PREFIX = InetAddresses.forString("172.17.0.0"); private static final String ETC_MACHINE_ID = "/etc/machine-id"; private static final Random random = new Random(System.nanoTime()); private final ContainerEngine containerEngine; private final Terminal terminal; private final IPAddresses ipAddresses; private final FileSystem fileSystem; public ContainerOperationsImpl(ContainerEngine containerEngine, Terminal terminal, IPAddresses ipAddresses, FileSystem fileSystem) { this.containerEngine = containerEngine; this.terminal = terminal; this.ipAddresses = ipAddresses; this.fileSystem = fileSystem; } @Override public void createContainer(NodeAgentContext context, ContainerData containerData, ContainerResources containerResources) { context.log(logger, "Creating container"); ContainerEngine.CreateContainerCommand command = containerEngine.createContainerCommand( context.node().wantedDockerImage().get(), context.containerName()) .withHostName(context.node().hostname()) .withResources(containerResources) .withManagedBy(MANAGER_NAME) .withDnsOption("inet6") .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 409_600, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN") .withAddCapability("SYS_NICE"); if (context.nodeType() != NodeType.proxy && context.nodeType() != NodeType.controller) command.withSecurityOpt("no-new-privileges"); if (context.node().membership().map(m -> m.type().isContent()).orElse(false)) command.withSecurityOpt("seccomp=unconfined"); DockerNetworking networking = context.dockerNetworking(); command.withNetworkMode(networking.getDockerNetworkMode()); if (networking == DockerNetworking.NPT) { Optional<? extends InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname()); Optional<? extends InetAddress> ipV6Local = ipAddresses.getIPv6Address(context.node().hostname()); assertEqualIpAddresses(context.hostname(), ipV4Local, context.node().ipAddresses(), IPVersion.IPv4); assertEqualIpAddresses(context.hostname(), ipV6Local, context.node().ipAddresses(), IPVersion.IPv6); if (ipV4Local.isEmpty() && ipV6Local.isEmpty()) { throw new ConvergenceException("Container " + context.node().hostname() + " with " + networking + " networking must have at least 1 IP address, but found none"); } ipV6Local = ipV6Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV6_NPT_PREFIX, 8)); ipV6Local.ifPresent(command::withIpAddress); ipV4Local = ipV4Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV4_NPT_PREFIX, 2)); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local); } else if (networking == DockerNetworking.LOCAL) { var ipv4Address = ipAddresses.getIPv4Address(context.node().hostname()) .orElseThrow(() -> new IllegalArgumentException("No IPv4 address could be resolved from '" + context.hostname()+ "'")); command.withIpAddress(ipv4Address); } UnixPath machineIdPath = new UnixPath(context.pathOnHostFromPathInNode(ETC_MACHINE_ID)); if (!machineIdPath.exists()) { String machineId = String.format("%16x%16x\n", random.nextLong(), random.nextLong()); machineIdPath.createParents().writeUtf8File(machineId); context.log(logger, "Wrote " + machineId + " to " + machineIdPath); } addMounts(context, command); logger.info("Creating new container with args: " + command); command.create(); } private static void assertEqualIpAddresses(HostName hostName, Optional<? extends InetAddress> resolvedAddress, Set<String> nrAddresses, IPVersion ipVersion) { Optional<InetAddress> nrAddress = nrAddresses.stream() .map(InetAddresses::forString) .filter(ipVersion::match) .findFirst(); if (resolvedAddress.equals(nrAddress)) return; throw new ConvergenceException(String.format( "IP address (%s) resolved from %s does not match IP address (%s) in node-repo", resolvedAddress.map(InetAddresses::toAddrString).orElse("[none]"), hostName, nrAddress.map(InetAddresses::toAddrString).orElse("[none]"))); } void addEtcHosts(ContainerData containerData, String hostname, Optional<? extends InetAddress> ipV4Local, Optional<? extends InetAddress> ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n"); ipV6Local.ifPresent(ipv6 -> etcHosts.append(ipv6.getHostAddress()).append('\t').append(hostname).append('\n')); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress()).append('\t').append(hostname).append('\n')); containerData.addFile(fileSystem.getPath("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); containerEngine.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); containerEngine.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); containerEngine.deleteContainer(context.containerName()); } @Override public void updateContainer(NodeAgentContext context, ContainerResources containerResources) { containerEngine.updateContainer(context.containerName(), containerResources); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return containerEngine.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage, RegistryCredentials registryCredentials) { return containerEngine.pullImageAsyncIfNeeded(dockerImage, registryCredentials); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command) { int containerPid = containerEngine.getContainer(context.containerName()) .filter(container -> container.state.isRunning()) .orElseThrow(() -> new RuntimeException( "Found no running container named " + context.containerName().asString())) .pid; return terminal.newCommandLine(context) .add("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--") .add(command) .executeSilently(); } @Override public void resumeNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "resume"); } @Override public void suspendNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "suspend"); } @Override public void restartVespa(NodeAgentContext context) { executeNodeCtlInContainer(context, "restart-vespa"); } @Override public void startServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "start"); } @Override public void stopServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "stop"); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return containerEngine.getContainerStats(context.containerName()); } private void addMounts(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { var volumes = new VolumeHelper(context, command); volumes.addPrivateVolumes( ETC_MACHINE_ID, "/etc/vespa/flags", "/etc/yamas-agent", "/opt/splunkforwarder/var/log", "/var/log", "/var/log/journal", "/var/spool/postfix/maildrop", "logs/vespa", "logs/ysar", "tmp", "var/crash", "var/container-data", "var/db/vespa", "var/jdisc_container", "var/vespa", "var/yca", "var/zookeeper"); if (context.nodeType() == NodeType.proxy) { volumes.addPrivateVolumes("logs/nginx", "var/vespa-hosted/routing"); } else if (context.nodeType() == NodeType.tenant) volumes.addPrivateVolumes("/var/lib/sia"); if (isInfrastructureHost(context.nodeType())) volumes.addSharedVolumeMap("/var/lib/sia", "/var/lib/sia"); boolean isMain = context.zone().getSystemName() == SystemName.cd || context.zone().getSystemName() == SystemName.main; if (isMain && context.nodeType() == NodeType.tenant) volumes.addSharedVolumeMap("/var/zpe", "var/zpe"); } @Override public boolean noManagedContainersRunning() { return containerEngine.noManagedContainersRunning(MANAGER_NAME); } @Override @Override public boolean deleteUnusedContainerImages(List<DockerImage> excludes, Duration minImageAgeToDelete) { return containerEngine.deleteUnusedDockerImages(excludes, minImageAgeToDelete); } /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.config || nodeType == NodeType.proxy || nodeType == NodeType.controller; } private static class VolumeHelper { private final NodeAgentContext context; private final ContainerEngine.CreateContainerCommand command; public VolumeHelper(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { this.context = context; this.command = command; } /** * Resolve each path to an absolute relative the container's vespa home directory. * Mounts the resulting path, under the container's storage directory as path in the container. */ public void addPrivateVolumes(String... pathsInNode) { Stream.of(pathsInNode).forEach(pathString -> { Path absolutePathInNode = resolveNodePath(pathString); Path pathOnHost = context.pathOnHostFromPathInNode(absolutePathInNode); command.withVolume(pathOnHost, absolutePathInNode); }); } /** * Mounts pathOnHost on the host as pathInNode in the container. Use for paths that * might be shared with other containers. */ public void addSharedVolumeMap(String pathOnHost, String pathInNode) { command.withSharedVolume(resolveNodePath(pathOnHost), resolveNodePath(pathInNode)); } private Path resolveNodePath(String pathString) { Path path = context.fileSystem().getPath(pathString); return path.isAbsolute() ? path : context.pathInNodeUnderVespaHome(path); } } }
class ContainerOperationsImpl implements ContainerOperations { private static final Logger logger = Logger.getLogger(ContainerOperationsImpl.class.getName()); static final String MANAGER_NAME = "node-admin"; private static final InetAddress IPV6_NPT_PREFIX = InetAddresses.forString("fd00::"); private static final InetAddress IPV4_NPT_PREFIX = InetAddresses.forString("172.17.0.0"); private static final String ETC_MACHINE_ID = "/etc/machine-id"; private static final Random random = new Random(System.nanoTime()); private final ContainerEngine containerEngine; private final Terminal terminal; private final IPAddresses ipAddresses; private final FileSystem fileSystem; public ContainerOperationsImpl(ContainerEngine containerEngine, Terminal terminal, IPAddresses ipAddresses, FileSystem fileSystem) { this.containerEngine = containerEngine; this.terminal = terminal; this.ipAddresses = ipAddresses; this.fileSystem = fileSystem; } @Override public void createContainer(NodeAgentContext context, ContainerData containerData, ContainerResources containerResources) { context.log(logger, "Creating container"); ContainerEngine.CreateContainerCommand command = containerEngine.createContainerCommand( context.node().wantedDockerImage().get(), context.containerName()) .withHostName(context.node().hostname()) .withResources(containerResources) .withManagedBy(MANAGER_NAME) .withDnsOption("inet6") .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 409_600, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN") .withAddCapability("SYS_NICE"); if (context.nodeType() != NodeType.proxy && context.nodeType() != NodeType.controller) command.withSecurityOpt("no-new-privileges"); if (context.node().membership().map(m -> m.type().isContent()).orElse(false)) command.withSecurityOpt("seccomp=unconfined"); DockerNetworking networking = context.dockerNetworking(); command.withNetworkMode(networking.getDockerNetworkMode()); if (networking == DockerNetworking.NPT) { Optional<? extends InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname()); Optional<? extends InetAddress> ipV6Local = ipAddresses.getIPv6Address(context.node().hostname()); assertEqualIpAddresses(context.hostname(), ipV4Local, context.node().ipAddresses(), IPVersion.IPv4); assertEqualIpAddresses(context.hostname(), ipV6Local, context.node().ipAddresses(), IPVersion.IPv6); if (ipV4Local.isEmpty() && ipV6Local.isEmpty()) { throw new ConvergenceException("Container " + context.node().hostname() + " with " + networking + " networking must have at least 1 IP address, but found none"); } ipV6Local = ipV6Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV6_NPT_PREFIX, 8)); ipV6Local.ifPresent(command::withIpAddress); ipV4Local = ipV4Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV4_NPT_PREFIX, 2)); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local); } else if (networking == DockerNetworking.LOCAL) { var ipv4Address = ipAddresses.getIPv4Address(context.node().hostname()) .orElseThrow(() -> new IllegalArgumentException("No IPv4 address could be resolved from '" + context.hostname()+ "'")); command.withIpAddress(ipv4Address); } UnixPath machineIdPath = new UnixPath(context.pathOnHostFromPathInNode(ETC_MACHINE_ID)); if (!machineIdPath.exists()) { String machineId = String.format("%16x%16x\n", random.nextLong(), random.nextLong()); machineIdPath.createParents().writeUtf8File(machineId); context.log(logger, "Wrote " + machineId + " to " + machineIdPath); } addMounts(context, command); logger.info("Creating new container with args: " + command); command.create(); } private static void assertEqualIpAddresses(HostName hostName, Optional<? extends InetAddress> resolvedAddress, Set<String> nrAddresses, IPVersion ipVersion) { Optional<InetAddress> nrAddress = nrAddresses.stream() .map(InetAddresses::forString) .filter(ipVersion::match) .findFirst(); if (resolvedAddress.equals(nrAddress)) return; throw new ConvergenceException(String.format( "IP address (%s) resolved from %s does not match IP address (%s) in node-repo", resolvedAddress.map(InetAddresses::toAddrString).orElse("[none]"), hostName, nrAddress.map(InetAddresses::toAddrString).orElse("[none]"))); } void addEtcHosts(ContainerData containerData, String hostname, Optional<? extends InetAddress> ipV4Local, Optional<? extends InetAddress> ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n"); ipV6Local.ifPresent(ipv6 -> etcHosts.append(ipv6.getHostAddress()).append('\t').append(hostname).append('\n')); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress()).append('\t').append(hostname).append('\n')); containerData.addFile(fileSystem.getPath("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); containerEngine.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); containerEngine.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); containerEngine.deleteContainer(context.containerName()); } @Override public void updateContainer(NodeAgentContext context, ContainerResources containerResources) { containerEngine.updateContainer(context.containerName(), containerResources); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return containerEngine.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage, RegistryCredentials registryCredentials) { return containerEngine.pullImageAsyncIfNeeded(dockerImage, registryCredentials); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command) { int containerPid = containerEngine.getContainer(context.containerName()) .filter(container -> container.state.isRunning()) .orElseThrow(() -> new RuntimeException( "Found no running container named " + context.containerName().asString())) .pid; return terminal.newCommandLine(context) .add("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--") .add(command) .executeSilently(); } @Override public void resumeNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "resume"); } @Override public void suspendNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "suspend"); } @Override public void restartVespa(NodeAgentContext context) { executeNodeCtlInContainer(context, "restart-vespa"); } @Override public void startServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "start"); } @Override public void stopServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "stop"); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return containerEngine.getContainerStats(context.containerName()); } private void addMounts(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { var volumes = new VolumeHelper(context, command); volumes.addPrivateVolumes( ETC_MACHINE_ID, "/etc/vespa/flags", "/etc/yamas-agent", "/opt/splunkforwarder/var/log", "/var/log", "/var/log/journal", "/var/spool/postfix/maildrop", "logs/vespa", "logs/ysar", "tmp", "var/crash", "var/container-data", "var/db/vespa", "var/jdisc_container", "var/vespa", "var/yca", "var/zookeeper"); if (context.nodeType() == NodeType.proxy) { volumes.addPrivateVolumes("logs/nginx", "var/vespa-hosted/routing"); } else if (context.nodeType() == NodeType.tenant) volumes.addPrivateVolumes("/var/lib/sia"); if (isInfrastructureHost(context.nodeType())) volumes.addSharedVolumeMap("/var/lib/sia", "/var/lib/sia"); boolean isMain = context.zone().getSystemName() == SystemName.cd || context.zone().getSystemName() == SystemName.main; if (isMain && context.nodeType() == NodeType.tenant) volumes.addSharedVolumeMap("/var/zpe", "var/zpe"); } @Override public boolean noManagedContainersRunning() { return containerEngine.noManagedContainersRunning(MANAGER_NAME); } @Override @Override public boolean deleteUnusedContainerImages(List<DockerImage> excludes, Duration minImageAgeToDelete) { return containerEngine.deleteUnusedDockerImages(excludes, minImageAgeToDelete); } /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.config || nodeType == NodeType.proxy || nodeType == NodeType.controller; } private static class VolumeHelper { private final NodeAgentContext context; private final ContainerEngine.CreateContainerCommand command; public VolumeHelper(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { this.context = context; this.command = command; } /** * Resolve each path to an absolute relative the container's vespa home directory. * Mounts the resulting path, under the container's storage directory as path in the container. */ public void addPrivateVolumes(String... pathsInNode) { Stream.of(pathsInNode).forEach(pathString -> { Path absolutePathInNode = resolveNodePath(pathString); Path pathOnHost = context.pathOnHostFromPathInNode(absolutePathInNode); command.withVolume(pathOnHost, absolutePathInNode); }); } /** * Mounts pathOnHost on the host as pathInNode in the container. Use for paths that * might be shared with other containers. */ public void addSharedVolumeMap(String pathOnHost, String pathInNode) { command.withSharedVolume(resolveNodePath(pathOnHost), resolveNodePath(pathInNode)); } private Path resolveNodePath(String pathString) { Path path = context.fileSystem().getPath(pathString); return path.isAbsolute() ? path : context.pathInNodeUnderVespaHome(path); } } }
See comment above
void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } }
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); } /** Returns the active documents on this groip. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); } /** Returns the active documents on this group. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
```suggestion return (int) nodes.stream().filter(node -> node.isWorking()).count(); ```
public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); }
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } } /** Returns the active documents on this groip. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } } /** Returns the active documents on this group. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
```suggestion long activeDocs = nodes.stream().filter(node -> node.isWorking()).mapToLong(Node::getActiveDocuments).sum(); ```
void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } }
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); } /** Returns the active documents on this groip. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); } /** Returns the active documents on this group. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
```suggestion long deviation = nodes.stream().filter(node -> node.isWorking()).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); ```
void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } }
long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum();
void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); } /** Returns the active documents on this groip. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); } /** Returns the active documents on this group. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
What happens if 'null' is returned ? Wont that give NullPointerException ?
public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); }
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } } /** Returns the active documents on this groip. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } } /** Returns the active documents on this group. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
See comment above
void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } }
long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum();
void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); } /** Returns the active documents on this groip. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); } /** Returns the active documents on this group. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
If it is already unmodifiable both ways are cheap, otherwise List.of is not.
public List<Integer> getOrderByIndexes() { return List.copyOf(orderByIdx); }
return List.copyOf(orderByIdx);
public List<Integer> getOrderByIndexes() { return List.copyOf(orderByIdx); }
class Group extends Identifiable { public static final int classId = registerClass(0x4000 + 90, Group.class); private static final ObjectPredicate REF_LOCATOR = new RefLocator(); private List<Integer> orderByIdx = List.of(); private List<ExpressionNode> orderByExp = List.of(); private List<AggregationResult> aggregationResults = List.of(); private List<Group> children = List.of(); private ResultNode id = null; private double rank; private int tag = -1; private SortType sortType = SortType.UNSORTED; private static <T> List<T> add(List<T> oldList, T obj) { if (oldList.isEmpty()) { return List.of(obj); } if (oldList.size() == 1) { return List.of(oldList.get(0), obj); } List<T> newList = (oldList instanceof ArrayList) ? oldList : new ArrayList<>(oldList); newList.add(obj); return newList; } private static <T> List<T> sort(List<T> list, Comparator<T> cmp) { if (list instanceof ArrayList) { list.sort(cmp); return list; } else { return list.stream().sorted(cmp).collect(Collectors.toList()); } } /** * This tells you if the children are ranked by the pure relevance or by a more complex expression. * That indicates if the rank score from the child can be used for ordering. * * @return true if it ranked by pure relevance. */ public boolean isRankedByRelevance() { return orderByIdx.isEmpty(); } /** * Merges the content of the given group <b>into</b> this. When this function returns, make sure to call * {@link * * @param firstLevel The first level to merge. * @param currentLevel The current level. * @param rhs The group to merge with. */ public void merge(int firstLevel, int currentLevel, Group rhs) { if (rhs.rank > rank) { rank = rhs.rank; } if (currentLevel >= firstLevel) { for (int i = 0, len = aggregationResults.size(); i < len; ++i) { aggregationResults.get(i).merge(rhs.aggregationResults.get(i)); } } ArrayList<Group> merged = new ArrayList<>(); Iterator<Group> lhsChild = children.iterator(), rhsChild = rhs.children.iterator(); if (lhsChild.hasNext() && rhsChild.hasNext()) { Group lhsGroup = lhsChild.next(); Group rhsGroup = rhsChild.next(); while ((lhsGroup != null) && (rhsGroup != null)) { int cmp = lhsGroup.getId().compareTo(rhsGroup.getId()); if (cmp < 0) { merged.add(lhsGroup); lhsGroup = lhsChild.hasNext() ? lhsChild.next() : null; } else if (cmp > 0) { merged.add(rhsGroup); rhsGroup = rhsChild.hasNext() ? rhsChild.next() : null; } else { lhsGroup.merge(firstLevel, currentLevel + 1, rhsGroup); merged.add(lhsGroup); lhsGroup = lhsChild.hasNext() ? lhsChild.next() : null; rhsGroup = rhsChild.hasNext() ? rhsChild.next() : null; } } if (lhsGroup != null) { merged.add(lhsGroup); } if (rhsGroup != null) { merged.add(rhsGroup); } } while (lhsChild.hasNext()) { merged.add(lhsChild.next()); } while (rhsChild.hasNext()) { merged.add(rhsChild.next()); } children = merged; } private void executeOrderBy() { for (ExpressionNode node : orderByExp) { node.prepare(); node.execute(); } } /** * After merging, this method will prune all levels so that they do not exceed the configured maximum number of * groups per level. * * @param levels The specs of all grouping levels. * @param firstLevel The first level to merge. * @param currentLevel The current level. */ public void postMerge(List<GroupingLevel> levels, int firstLevel, int currentLevel) { if (currentLevel >= firstLevel) { for (AggregationResult result : aggregationResults) { result.postMerge(); } for (ExpressionNode result : orderByExp) { result.execute(); } } if (currentLevel < levels.size()) { int maxGroups = (int)levels.get(currentLevel).getMaxGroups(); for (Group group : children) { group.executeOrderBy(); } if (maxGroups >= 0 && children.size() > maxGroups) { sortChildrenByRank(); children = children.subList(0, maxGroups); sortChildrenById(); } for (Group group : children) { group.postMerge(levels, firstLevel, currentLevel + 1); } } } /** Sorts the children by their id, if they are not sorted already. */ public void sortChildrenById() { if (sortType == SortType.BYID) { return; } children = sort(children, Group::compareId); sortType = SortType.BYID; } /** Sorts the children by their rank, if they are not sorted already. */ public void sortChildrenByRank() { if (sortType == SortType.BYRANK) { return; } children = sort(children, Group::compareRank); sortType = SortType.BYRANK; } /** * Returns the label to use for this group. See comment on {@link * on the rationale of this being a {@link ResultNode}. */ public ResultNode getId() { return id; } /** * Sets the label to use for this group. This is a {@link ResultNode} so that a group can be labeled with * whatever value the classifier expression returns. * * @param id the label to set * @return this, to allow chaining */ public Group setId(ResultNode id) { this.id = id; return this; } /** * Sets the relevancy to use for this group. * * @param rank The rank to set. * @return This, to allow chaining. */ public Group setRank(double rank) { this.rank = rank; return this; } /** Return the rank score of this group. */ public double getRank() { return rank; } /** * Adds a child group to this. * * @param child The group to add. * @return This, to allow chaining. */ public Group addChild(Group child) { if (child == null) { throw new IllegalArgumentException("Child can not be null."); } children = add(children, child); return this; } /** Returns the list of child groups to this. */ public List<Group> getChildren() { return List.copyOf(children); } /** * Returns the tag of this group. This value is set per-level in the grouping request, and then becomes assigned * to each group of that level in the grouping result as they are copied from the prototype. */ public int getTag() { return tag; } /** * Assigns a tag to this group. * * @param tag the numerical tag to set * @return this, to allow chaining */ public Group setTag(int tag) { this.tag = tag; return this; } /** * Returns this group's aggregation results. * * @return the aggregation results */ public List<AggregationResult> getAggregationResults() { return List.copyOf(aggregationResults); } /** * Adds an aggregation result to this group. * * @param result the result to add * @return this, to allow chaining */ public Group addAggregationResult(AggregationResult result) { aggregationResults = add(aggregationResults, result); return this; } /** * Adds an order-by expression to this group. If the expression is an AggregationResult, it will be added to the * list of this group's AggregationResults, and a reference to that expression is added instead. If the * AggregationResult is already present, a reference to THAT result is created instead. * * @param exp the result to add * @param asc true to sort ascending, false to sort descending * @return this, to allow chaining */ public Group addOrderBy(ExpressionNode exp, boolean asc) { if (exp instanceof AggregationResult) { exp = new AggregationRefNode((AggregationResult)exp); } RefResolver refResolver = new RefResolver(aggregationResults); exp.select(REF_LOCATOR, refResolver); aggregationResults = refResolver.results; orderByExp = add(orderByExp, exp); orderByIdx = add(orderByIdx, (asc ? 1 : -1) * orderByExp.size()); return this; } public List<ExpressionNode> getOrderByExpressions() { return List.copyOf(orderByExp); } private int compareId(Group rhs) { return getId().compareTo(rhs.getId()); } private int compareRank(Group rhs) { long diff = 0; for (int i = 0, m = orderByIdx.size(); (diff == 0) && (i < m); i++) { int rawIndex = orderByIdx.get(i); int index = ((rawIndex < 0) ? -rawIndex : rawIndex) - 1; diff = orderByExp.get(index).getResult().compareTo(rhs.orderByExp.get(index).getResult()); diff = diff * rawIndex; } if (diff < 0) { return -1; } if (diff > 0) { return 1; } return -Double.compare(rank, rhs.rank); } @Override protected int onGetClassId() { return classId; } @Override protected void onSerialize(Serializer buf) { super.onSerialize(buf); serializeOptional(buf, id); buf.putDouble(null, rank); int sz = orderByIdx.size(); buf.putInt(null, sz); for (Integer index : orderByIdx) { buf.putInt(null, index); } int numResults = aggregationResults.size(); buf.putInt(null, numResults); for (AggregationResult a : aggregationResults) { serializeOptional(buf, a); } int numExpressionResults = orderByExp.size(); buf.putInt(null, numExpressionResults); for (ExpressionNode e : orderByExp) { serializeOptional(buf, e); } int numGroups = children.size(); buf.putInt(null, numGroups); for (Group g : children) { g.serializeWithId(buf); } buf.putInt(null, tag); } @Override protected void onDeserialize(Deserializer buf) { super.onDeserialize(buf); id = (ResultNode)deserializeOptional(buf); rank = buf.getDouble(null); orderByIdx = List.of(); int orderByCount = buf.getInt(null); if (orderByCount > 0) { Integer [] idxes = new Integer[orderByCount]; for (int i = 0; i < orderByCount; i++) { idxes[i] = buf.getInt(null); } orderByIdx = List.of(idxes); } int numResults = buf.getInt(null); if (numResults > 0) { AggregationResult [] results = new AggregationResult[numResults]; for (int i = 0; i < numResults; i++) { results[i] = (AggregationResult) deserializeOptional(buf); } aggregationResults = List.of(results); } else { aggregationResults = List.of(); } int numExpressionResults = buf.getInt(null); if (numExpressionResults > 0) { RefResolver resolver = new RefResolver(aggregationResults); ExpressionNode[] orderBy = new ExpressionNode[numExpressionResults]; for (int i = 0; i < numExpressionResults; i++) { ExpressionNode exp = (ExpressionNode) deserializeOptional(buf); exp.select(REF_LOCATOR, resolver); orderBy[i] = exp; } aggregationResults = resolver.results; orderByExp = List.of(orderBy); } else { orderByExp = List.of(); } int numGroups = buf.getInt(null); if (numGroups > 0) { Group [] groups = new Group[numGroups]; for (int i = 0; i < numGroups; i++) { Group g = new Group(); g.deserializeWithId(buf); groups[i] = g; } children = List.of(groups); } else { children = List.of(); } tag = buf.getInt(null); } @Override public int hashCode() { return super.hashCode() + aggregationResults.hashCode() + children.hashCode(); } @Override public boolean equals(Object obj) { if (obj == this) return true; if (!super.equals(obj)) return false; Group rhs = (Group)obj; if (!equals(id, rhs.id)) return false; if (rank != rhs.rank) return false; if (!aggregationResults.equals(rhs.aggregationResults)) return false; if (!orderByIdx.equals(rhs.orderByIdx)) return false; if (!orderByExp.equals(rhs.orderByExp)) return false; if (!children.equals(rhs.children)) return false; return true; } @Override public Group clone() { Group obj = (Group)super.clone(); if (id != null) { obj.id = (ResultNode)id.clone(); } if ( ! aggregationResults.isEmpty() ) { AggregationResult [] results = new AggregationResult[aggregationResults.size()]; int i = 0; for (AggregationResult result : aggregationResults) { results[i++] = result.clone(); } obj.aggregationResults = List.of(results); } obj.orderByIdx = List.copyOf(orderByIdx); if ( ! orderByExp.isEmpty()) { obj.orderByExp = new ArrayList<>(); RefResolver resolver = new RefResolver(obj.aggregationResults); ExpressionNode[] orderBy = new ExpressionNode[orderByExp.size()]; int i = 0; for (ExpressionNode exp : orderByExp) { exp = exp.clone(); exp.select(REF_LOCATOR, resolver); orderBy[i++] = exp; } obj.orderByExp = List.of(orderBy); obj.aggregationResults = resolver.results; } if ( ! children.isEmpty() ) { Group [] groups = new Group[children.size()]; int i = 0; for (Group child : children) { groups[i++] = child.clone(); } obj.children = List.of(groups); } return obj; } @Override public void visitMembers(ObjectVisitor visitor) { super.visitMembers(visitor); visitor.visit("id", id); visitor.visit("rank", rank); visitor.visit("aggregationresults", aggregationResults); visitor.visit("orderby-idx", orderByIdx); visitor.visit("orderby-exp", orderByExp); visitor.visit("children", children); visitor.visit("tag", tag); } @Override public void selectMembers(ObjectPredicate predicate, ObjectOperation operation) { for (AggregationResult result : aggregationResults) { result.select(predicate, operation); } for (ExpressionNode exp : orderByExp) { exp.select(predicate, operation); } } private enum SortType { UNSORTED, BYRANK, BYID } private static class RefLocator implements ObjectPredicate { @Override public boolean check(Object obj) { return obj instanceof AggregationRefNode; } } private static class RefResolver implements ObjectOperation { List<AggregationResult> results; RefResolver(List<AggregationResult> initial) { this.results = initial; } @Override public void execute(Object obj) { AggregationRefNode ref = (AggregationRefNode)obj; int idx = ref.getIndex(); if (idx < 0) { AggregationResult res = ref.getExpression(); idx = indexOf(res); if (idx < 0) { idx = results.size(); results = add(results, res); } ref.setIndex(idx); } else { ref.setExpression(results.get(idx)); } } int indexOf(AggregationResult lhs) { int prevTag = lhs.getTag(); for (int i = 0, len = results.size(); i < len; ++i) { AggregationResult rhs = results.get(i); lhs.setTag(rhs.getTag()); if (lhs.equals(rhs)) { return i; } } lhs.setTag(prevTag); return -1; } } }
class Group extends Identifiable { public static final int classId = registerClass(0x4000 + 90, Group.class); private static final ObjectPredicate REF_LOCATOR = new RefLocator(); private List<Integer> orderByIdx = List.of(); private List<ExpressionNode> orderByExp = List.of(); private List<AggregationResult> aggregationResults = List.of(); private List<Group> children = List.of(); private ResultNode id = null; private double rank; private int tag = -1; private SortType sortType = SortType.UNSORTED; private static <T> List<T> add(List<T> oldList, T obj) { if (oldList.isEmpty()) { return List.of(obj); } if (oldList.size() == 1) { return List.of(oldList.get(0), obj); } List<T> newList = (oldList instanceof ArrayList) ? oldList : new ArrayList<>(oldList); newList.add(obj); return newList; } private static <T> List<T> sort(List<T> list, Comparator<T> cmp) { if (list instanceof ArrayList) { list.sort(cmp); return list; } else { if (list.size() < 2) return list; if (list.size() == 2) { return (cmp.compare(list.get(0), list.get(1)) > 0) ? List.of(list.get(1), list.get(0)) : list; } return list.stream().sorted(cmp).collect(Collectors.toList()); } } /** * This tells you if the children are ranked by the pure relevance or by a more complex expression. * That indicates if the rank score from the child can be used for ordering. * * @return true if it ranked by pure relevance. */ public boolean isRankedByRelevance() { return orderByIdx.isEmpty(); } /** * Merges the content of the given group <b>into</b> this. When this function returns, make sure to call * {@link * * @param firstLevel The first level to merge. * @param currentLevel The current level. * @param rhs The group to merge with. */ public void merge(int firstLevel, int currentLevel, Group rhs) { if (rhs.rank > rank) { rank = rhs.rank; } if (currentLevel >= firstLevel) { for (int i = 0, len = aggregationResults.size(); i < len; ++i) { aggregationResults.get(i).merge(rhs.aggregationResults.get(i)); } } ArrayList<Group> merged = new ArrayList<>(); Iterator<Group> lhsChild = children.iterator(), rhsChild = rhs.children.iterator(); if (lhsChild.hasNext() && rhsChild.hasNext()) { Group lhsGroup = lhsChild.next(); Group rhsGroup = rhsChild.next(); while ((lhsGroup != null) && (rhsGroup != null)) { int cmp = lhsGroup.getId().compareTo(rhsGroup.getId()); if (cmp < 0) { merged.add(lhsGroup); lhsGroup = lhsChild.hasNext() ? lhsChild.next() : null; } else if (cmp > 0) { merged.add(rhsGroup); rhsGroup = rhsChild.hasNext() ? rhsChild.next() : null; } else { lhsGroup.merge(firstLevel, currentLevel + 1, rhsGroup); merged.add(lhsGroup); lhsGroup = lhsChild.hasNext() ? lhsChild.next() : null; rhsGroup = rhsChild.hasNext() ? rhsChild.next() : null; } } if (lhsGroup != null) { merged.add(lhsGroup); } if (rhsGroup != null) { merged.add(rhsGroup); } } while (lhsChild.hasNext()) { merged.add(lhsChild.next()); } while (rhsChild.hasNext()) { merged.add(rhsChild.next()); } children = merged; } private void executeOrderBy() { for (ExpressionNode node : orderByExp) { node.prepare(); node.execute(); } } /** * After merging, this method will prune all levels so that they do not exceed the configured maximum number of * groups per level. * * @param levels The specs of all grouping levels. * @param firstLevel The first level to merge. * @param currentLevel The current level. */ public void postMerge(List<GroupingLevel> levels, int firstLevel, int currentLevel) { if (currentLevel >= firstLevel) { for (AggregationResult result : aggregationResults) { result.postMerge(); } for (ExpressionNode result : orderByExp) { result.execute(); } } if (currentLevel < levels.size()) { int maxGroups = (int)levels.get(currentLevel).getMaxGroups(); for (Group group : children) { group.executeOrderBy(); } if (maxGroups >= 0 && children.size() > maxGroups) { sortChildrenByRank(); children = children.subList(0, maxGroups); sortChildrenById(); } for (Group group : children) { group.postMerge(levels, firstLevel, currentLevel + 1); } } } /** Sorts the children by their id, if they are not sorted already. */ public void sortChildrenById() { if (sortType == SortType.BYID) { return; } children = sort(children, Group::compareId); sortType = SortType.BYID; } /** Sorts the children by their rank, if they are not sorted already. */ public void sortChildrenByRank() { if (sortType == SortType.BYRANK) { return; } children = sort(children, Group::compareRank); sortType = SortType.BYRANK; } /** * Returns the label to use for this group. See comment on {@link * on the rationale of this being a {@link ResultNode}. */ public ResultNode getId() { return id; } /** * Sets the label to use for this group. This is a {@link ResultNode} so that a group can be labeled with * whatever value the classifier expression returns. * * @param id the label to set * @return this, to allow chaining */ public Group setId(ResultNode id) { this.id = id; return this; } /** * Sets the relevancy to use for this group. * * @param rank The rank to set. * @return This, to allow chaining. */ public Group setRank(double rank) { this.rank = rank; return this; } /** Return the rank score of this group. */ public double getRank() { return rank; } /** * Adds a child group to this. * * @param child The group to add. * @return This, to allow chaining. */ public Group addChild(Group child) { if (child == null) { throw new IllegalArgumentException("Child can not be null."); } children = add(children, child); return this; } /** Returns the list of child groups to this. */ public List<Group> getChildren() { return List.copyOf(children); } /** * Returns the tag of this group. This value is set per-level in the grouping request, and then becomes assigned * to each group of that level in the grouping result as they are copied from the prototype. */ public int getTag() { return tag; } /** * Assigns a tag to this group. * * @param tag the numerical tag to set * @return this, to allow chaining */ public Group setTag(int tag) { this.tag = tag; return this; } /** * Returns this group's aggregation results. * * @return the aggregation results */ public List<AggregationResult> getAggregationResults() { return List.copyOf(aggregationResults); } /** * Adds an aggregation result to this group. * * @param result the result to add * @return this, to allow chaining */ public Group addAggregationResult(AggregationResult result) { aggregationResults = add(aggregationResults, result); return this; } /** * Adds an order-by expression to this group. If the expression is an AggregationResult, it will be added to the * list of this group's AggregationResults, and a reference to that expression is added instead. If the * AggregationResult is already present, a reference to THAT result is created instead. * * @param exp the result to add * @param asc true to sort ascending, false to sort descending * @return this, to allow chaining */ public Group addOrderBy(ExpressionNode exp, boolean asc) { if (exp instanceof AggregationResult) { exp = new AggregationRefNode((AggregationResult)exp); } RefResolver refResolver = new RefResolver(aggregationResults); exp.select(REF_LOCATOR, refResolver); aggregationResults = refResolver.results; orderByExp = add(orderByExp, exp); orderByIdx = add(orderByIdx, (asc ? 1 : -1) * orderByExp.size()); return this; } public List<ExpressionNode> getOrderByExpressions() { return List.copyOf(orderByExp); } private int compareId(Group rhs) { return getId().compareTo(rhs.getId()); } private int compareRank(Group rhs) { long diff = 0; for (int i = 0, m = orderByIdx.size(); (diff == 0) && (i < m); i++) { int rawIndex = orderByIdx.get(i); int index = ((rawIndex < 0) ? -rawIndex : rawIndex) - 1; diff = orderByExp.get(index).getResult().compareTo(rhs.orderByExp.get(index).getResult()); diff = diff * rawIndex; } if (diff < 0) { return -1; } if (diff > 0) { return 1; } return -Double.compare(rank, rhs.rank); } @Override protected int onGetClassId() { return classId; } @Override protected void onSerialize(Serializer buf) { super.onSerialize(buf); serializeOptional(buf, id); buf.putDouble(null, rank); int sz = orderByIdx.size(); buf.putInt(null, sz); for (Integer index : orderByIdx) { buf.putInt(null, index); } int numResults = aggregationResults.size(); buf.putInt(null, numResults); for (AggregationResult a : aggregationResults) { serializeOptional(buf, a); } int numExpressionResults = orderByExp.size(); buf.putInt(null, numExpressionResults); for (ExpressionNode e : orderByExp) { serializeOptional(buf, e); } int numGroups = children.size(); buf.putInt(null, numGroups); for (Group g : children) { g.serializeWithId(buf); } buf.putInt(null, tag); } @Override protected void onDeserialize(Deserializer buf) { super.onDeserialize(buf); id = (ResultNode)deserializeOptional(buf); rank = buf.getDouble(null); orderByIdx = List.of(); int orderByCount = buf.getInt(null); if (orderByCount > 0) { Integer [] idxes = new Integer[orderByCount]; for (int i = 0; i < orderByCount; i++) { idxes[i] = buf.getInt(null); } orderByIdx = List.of(idxes); } int numResults = buf.getInt(null); if (numResults > 0) { AggregationResult [] results = new AggregationResult[numResults]; for (int i = 0; i < numResults; i++) { results[i] = (AggregationResult) deserializeOptional(buf); } aggregationResults = List.of(results); } else { aggregationResults = List.of(); } int numExpressionResults = buf.getInt(null); if (numExpressionResults > 0) { RefResolver resolver = new RefResolver(aggregationResults); ExpressionNode[] orderBy = new ExpressionNode[numExpressionResults]; for (int i = 0; i < numExpressionResults; i++) { ExpressionNode exp = (ExpressionNode) deserializeOptional(buf); exp.select(REF_LOCATOR, resolver); orderBy[i] = exp; } aggregationResults = resolver.results; orderByExp = List.of(orderBy); } else { orderByExp = List.of(); } int numGroups = buf.getInt(null); if (numGroups > 0) { Group [] groups = new Group[numGroups]; for (int i = 0; i < numGroups; i++) { Group g = new Group(); g.deserializeWithId(buf); groups[i] = g; } children = List.of(groups); } else { children = List.of(); } tag = buf.getInt(null); } @Override public int hashCode() { return super.hashCode() + aggregationResults.hashCode() + children.hashCode(); } @Override public boolean equals(Object obj) { if (obj == this) return true; if (!super.equals(obj)) return false; Group rhs = (Group)obj; if (!equals(id, rhs.id)) return false; if (rank != rhs.rank) return false; if (!aggregationResults.equals(rhs.aggregationResults)) return false; if (!orderByIdx.equals(rhs.orderByIdx)) return false; if (!orderByExp.equals(rhs.orderByExp)) return false; if (!children.equals(rhs.children)) return false; return true; } @Override public Group clone() { Group obj = (Group)super.clone(); if (id != null) { obj.id = (ResultNode)id.clone(); } if ( ! aggregationResults.isEmpty() ) { AggregationResult [] results = new AggregationResult[aggregationResults.size()]; int i = 0; for (AggregationResult result : aggregationResults) { results[i++] = result.clone(); } obj.aggregationResults = List.of(results); } obj.orderByIdx = List.copyOf(orderByIdx); if ( ! orderByExp.isEmpty()) { obj.orderByExp = new ArrayList<>(); RefResolver resolver = new RefResolver(obj.aggregationResults); ExpressionNode[] orderBy = new ExpressionNode[orderByExp.size()]; int i = 0; for (ExpressionNode exp : orderByExp) { exp = exp.clone(); exp.select(REF_LOCATOR, resolver); orderBy[i++] = exp; } obj.orderByExp = List.of(orderBy); obj.aggregationResults = resolver.results; } if ( ! children.isEmpty() ) { Group [] groups = new Group[children.size()]; int i = 0; for (Group child : children) { groups[i++] = child.clone(); } obj.children = List.of(groups); } return obj; } @Override public void visitMembers(ObjectVisitor visitor) { super.visitMembers(visitor); visitor.visit("id", id); visitor.visit("rank", rank); visitor.visit("aggregationresults", aggregationResults); visitor.visit("orderby-idx", orderByIdx); visitor.visit("orderby-exp", orderByExp); visitor.visit("children", children); visitor.visit("tag", tag); } @Override public void selectMembers(ObjectPredicate predicate, ObjectOperation operation) { for (AggregationResult result : aggregationResults) { result.select(predicate, operation); } for (ExpressionNode exp : orderByExp) { exp.select(predicate, operation); } } private enum SortType { UNSORTED, BYRANK, BYID } private static class RefLocator implements ObjectPredicate { @Override public boolean check(Object obj) { return obj instanceof AggregationRefNode; } } private static class RefResolver implements ObjectOperation { List<AggregationResult> results; RefResolver(List<AggregationResult> initial) { this.results = initial; } @Override public void execute(Object obj) { AggregationRefNode ref = (AggregationRefNode)obj; int idx = ref.getIndex(); if (idx < 0) { AggregationResult res = ref.getExpression(); idx = indexOf(res); if (idx < 0) { idx = results.size(); results = add(results, res); } ref.setIndex(idx); } else { ref.setExpression(results.get(idx)); } } int indexOf(AggregationResult lhs) { int prevTag = lhs.getTag(); for (int i = 0, len = results.size(); i < len; ++i) { AggregationResult rhs = results.get(i); lhs.setTag(rhs.getTag()); if (lhs.equals(rhs)) { return i; } } lhs.setTag(prevTag); return -1; } } }
💥
public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); }
return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count();
public int workingNodes() { return (int) nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).count(); }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } } /** Returns the active documents on this groip. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicBoolean hasFullCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); private final AtomicBoolean isBlockingWrites = new AtomicBoolean(false); private final AtomicBoolean isContentWellBalanced = new AtomicBoolean(true); private final static double MAX_UNBALANCE = 0.10; public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); int idx = 0; for(var node: nodes) { node.setPathIndex(idx); idx++; } } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateNodeValues() { long activeDocs = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(Node::getActiveDocuments).sum(); activeDocuments.set(activeDocs); isBlockingWrites.set(nodes.stream().anyMatch(Node::isBlockingWrites)); int numWorkingNodes = workingNodes(); if (numWorkingNodes > 0) { long average = activeDocs / numWorkingNodes; long deviation = nodes.stream().filter(node -> node.isWorking() == Boolean.TRUE).mapToLong(node -> Math.abs(node.getActiveDocuments() - average)).sum(); isContentWellBalanced.set(deviation <= (activeDocs * MAX_UNBALANCE)); } else { isContentWellBalanced.set(true); } } /** Returns the active documents on this group. If unknown, 0 is returned. */ long getActiveDocuments() { return activeDocuments.get(); } /** Returns whether any node in this group is currently blocking write operations */ public boolean isBlockingWrites() { return isBlockingWrites.get(); } public boolean isContentWellBalanced() { return isContentWellBalanced.get(); } public boolean isFullCoverageStatusChanged(boolean hasFullCoverageNow) { boolean previousState = hasFullCoverage.getAndSet(hasFullCoverageNow); return previousState != hasFullCoverageNow; } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
This is just an optimization, right? I don't think it's necessary at this level, but consider adding a comment explaining why `getNode` is called twice.
public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; for (int i = 0; i < 4; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up trying to fetch an up to date node under lock: " + node.hostname()); }
Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state());
public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
Double copy, considering collecting to `Collectors.toUnmodifiableList()` instead.
public List<Node> nodes() { return List.copyOf(nodes.values().stream().map(NodeMutex::node).collect(Collectors.toList())); }
return List.copyOf(nodes.values().stream().map(NodeMutex::node).collect(Collectors.toList()));
public List<Node> nodes() { return nodes.values().stream().map(NodeMutex::node).collect(Collectors.toList()); }
class PatchedNodes implements Mutex { private final Map<String, NodeMutex> nodes = new HashMap<>(); private final String hostname; private boolean fetchedChildren; private PatchedNodes(NodeMutex nodeMutex) { this.hostname = nodeMutex.node().hostname(); nodes.put(hostname, nodeMutex); fetchedChildren = !nodeMutex.node().type().isHost(); } public NodeMutex nodeMutex() { return nodes.get(hostname); } public Node node() { return nodeMutex().node(); } public List<Node> children() { if (!fetchedChildren) { memoizedNodes.get() .childrenOf(hostname) .forEach(node -> nodeRepository.lockAndGet(node) .ifPresent(nodeMutex -> nodes.put(nodeMutex.node().hostname(), nodeMutex))); fetchedChildren = true; } return nodes.values().stream() .map(NodeMutex::node) .filter(node -> !node.type().isHost()) .collect(Collectors.toList()); } public void update(Node node) { NodeMutex currentNodeMutex = nodes.get(node.hostname()); if (currentNodeMutex == null) { throw new IllegalStateException("unable to update non-existing child: " + node.hostname()); } nodes.put(node.hostname(), currentNodeMutex.with(node)); } @Override public void close() { nodes.values().forEach(NodeMutex::close); } }
class PatchedNodes implements Mutex { private final Map<String, NodeMutex> nodes = new HashMap<>(); private final String hostname; private boolean fetchedChildren; private PatchedNodes(NodeMutex nodeMutex) { this.hostname = nodeMutex.node().hostname(); nodes.put(hostname, nodeMutex); fetchedChildren = !nodeMutex.node().type().isHost(); } public NodeMutex nodeMutex() { return nodes.get(hostname); } public Node node() { return nodeMutex().node(); } public List<Node> children() { if (!fetchedChildren) { memoizedNodes.get() .childrenOf(hostname) .forEach(node -> nodeRepository.lockAndGet(node) .ifPresent(nodeMutex -> nodes.put(nodeMutex.node().hostname(), nodeMutex))); fetchedChildren = true; } return nodes.values().stream() .map(NodeMutex::node) .filter(node -> !node.type().isHost()) .collect(Collectors.toList()); } public void update(Node node) { NodeMutex currentNodeMutex = nodes.get(node.hostname()); if (currentNodeMutex == null) { throw new IllegalStateException("unable to update non-existing child: " + node.hostname()); } nodes.put(node.hostname(), currentNodeMutex.with(node)); } @Override public void close() { nodes.values().forEach(NodeMutex::close); } }