comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
If a is != 0 and b is 0 I want it to throw as something is wrong.
private static double divide(double a, double b) { if (a == 0 && b == 0) return 0; return a / b; }
if (a == 0 && b == 0) return 0;
private static double divide(double a, double b) { if (a == 0 && b == 0) return 0; return a / b; }
class NodeRepoStats { private final Load load; private final Load activeLoad; private NodeRepoStats(Load load, Load activeLoad) { this.load = load; this.activeLoad = activeLoad; } /** * Returns the current average work-extracting utilization in this node repo over all nodes. * Capacity not allocated to active nodes are taken to have 0 utilization as it provides no useful work. */ public Load load() { return load; } /** Returns the current average utilization in this node repo over all active nodes. */ public Load activeLoad() { return activeLoad; } public static NodeRepoStats computeOver(NodeRepository nodeRepository) { NodeList allNodes = nodeRepository.nodes().list(); NodeResources totalActiveResources = new NodeResources(0, 0, 0, 0); double cpu = 0, memory = 0, disk = 0; for (var nodeTimeseries : nodeRepository.metricsDb().getNodeTimeseries(Duration.ofHours(1), Set.of())) { Optional<Node> node = allNodes.node(nodeTimeseries.hostname()); if (node.isEmpty() || node.get().state() != Node.State.active) continue; Optional<NodeMetricSnapshot> snapshot = nodeTimeseries.last(); if (snapshot.isEmpty()) continue; cpu += snapshot.get().cpu() * node.get().resources().vcpu(); memory += snapshot.get().memory() * node.get().resources().memoryGb(); disk += snapshot.get().disk() * node.get().resources().diskGb(); totalActiveResources = totalActiveResources.add(node.get().resources().justNumbers()); } NodeResources totalHostResources = new NodeResources(0, 0, 0, 0); for (var host : allNodes.hosts()) { totalHostResources = totalHostResources.add(host.resources().justNumbers()); } Load load = new Load(divide(cpu, totalHostResources.vcpu()), divide(memory, totalHostResources.memoryGb()), divide(disk, totalHostResources.diskGb())); Load activeLoad = new Load(divide(cpu, totalActiveResources.vcpu()), divide(memory, totalActiveResources.memoryGb()), divide(disk, totalActiveResources.diskGb())); return new NodeRepoStats(load, activeLoad); } }
class NodeRepoStats { private final Load load; private final Load activeLoad; private NodeRepoStats(Load load, Load activeLoad) { this.load = load; this.activeLoad = activeLoad; } /** * Returns the current average work-extracting utilization in this node repo over all nodes. * Capacity not allocated to active nodes are taken to have 0 utilization as it provides no useful work. */ public Load load() { return load; } /** Returns the current average utilization in this node repo over all active nodes. */ public Load activeLoad() { return activeLoad; } public static NodeRepoStats computeOver(NodeRepository nodeRepository) { NodeList allNodes = nodeRepository.nodes().list(); NodeResources totalActiveResources = new NodeResources(0, 0, 0, 0); double cpu = 0, memory = 0, disk = 0; for (var nodeTimeseries : nodeRepository.metricsDb().getNodeTimeseries(Duration.ofHours(1), Set.of())) { Optional<Node> node = allNodes.node(nodeTimeseries.hostname()); if (node.isEmpty() || node.get().state() != Node.State.active) continue; Optional<NodeMetricSnapshot> snapshot = nodeTimeseries.last(); if (snapshot.isEmpty()) continue; cpu += snapshot.get().cpu() * node.get().resources().vcpu(); memory += snapshot.get().memory() * node.get().resources().memoryGb(); disk += snapshot.get().disk() * node.get().resources().diskGb(); totalActiveResources = totalActiveResources.add(node.get().resources().justNumbers()); } NodeResources totalHostResources = new NodeResources(0, 0, 0, 0); for (var host : allNodes.hosts()) { totalHostResources = totalHostResources.add(host.resources().justNumbers()); } Load load = new Load(divide(cpu, totalHostResources.vcpu()), divide(memory, totalHostResources.memoryGb()), divide(disk, totalHostResources.diskGb())); Load activeLoad = new Load(divide(cpu, totalActiveResources.vcpu()), divide(memory, totalActiveResources.memoryGb()), divide(disk, totalActiveResources.diskGb())); return new NodeRepoStats(load, activeLoad); } }
That would be a measure of "waste", not "utilization". That could be useful too. I didn't really consider it, but now I'm going to. Utilization is also useful though, (more interpretable, and it could be that the 0.4 number is wrong), so I suggest this can be merged.
public static NodeRepoStats computeOver(NodeRepository nodeRepository) { NodeList allNodes = nodeRepository.nodes().list(); NodeResources totalActiveResources = new NodeResources(0, 0, 0, 0); double cpu = 0, memory = 0, disk = 0; for (var nodeTimeseries : nodeRepository.metricsDb().getNodeTimeseries(Duration.ofHours(1), Set.of())) { Optional<Node> node = allNodes.node(nodeTimeseries.hostname()); if (node.isEmpty() || node.get().state() != Node.State.active) continue; Optional<NodeMetricSnapshot> snapshot = nodeTimeseries.last(); if (snapshot.isEmpty()) continue; cpu += snapshot.get().cpu() * node.get().resources().vcpu(); memory += snapshot.get().memory() * node.get().resources().memoryGb(); disk += snapshot.get().disk() * node.get().resources().diskGb(); totalActiveResources = totalActiveResources.add(node.get().resources().justNumbers()); } NodeResources totalHostResources = new NodeResources(0, 0, 0, 0); for (var host : allNodes.hosts()) { totalHostResources = totalHostResources.add(host.resources().justNumbers()); } Load load = new Load(divide(cpu, totalHostResources.vcpu()), divide(memory, totalHostResources.memoryGb()), divide(disk, totalHostResources.diskGb())); Load activeLoad = new Load(divide(cpu, totalActiveResources.vcpu()), divide(memory, totalActiveResources.memoryGb()), divide(disk, totalActiveResources.diskGb())); return new NodeRepoStats(load, activeLoad); }
cpu += snapshot.get().cpu() * node.get().resources().vcpu();
public static NodeRepoStats computeOver(NodeRepository nodeRepository) { NodeList allNodes = nodeRepository.nodes().list(); NodeResources totalActiveResources = new NodeResources(0, 0, 0, 0); double cpu = 0, memory = 0, disk = 0; for (var nodeTimeseries : nodeRepository.metricsDb().getNodeTimeseries(Duration.ofHours(1), Set.of())) { Optional<Node> node = allNodes.node(nodeTimeseries.hostname()); if (node.isEmpty() || node.get().state() != Node.State.active) continue; Optional<NodeMetricSnapshot> snapshot = nodeTimeseries.last(); if (snapshot.isEmpty()) continue; cpu += snapshot.get().cpu() * node.get().resources().vcpu(); memory += snapshot.get().memory() * node.get().resources().memoryGb(); disk += snapshot.get().disk() * node.get().resources().diskGb(); totalActiveResources = totalActiveResources.add(node.get().resources().justNumbers()); } NodeResources totalHostResources = new NodeResources(0, 0, 0, 0); for (var host : allNodes.hosts()) { totalHostResources = totalHostResources.add(host.resources().justNumbers()); } Load load = new Load(divide(cpu, totalHostResources.vcpu()), divide(memory, totalHostResources.memoryGb()), divide(disk, totalHostResources.diskGb())); Load activeLoad = new Load(divide(cpu, totalActiveResources.vcpu()), divide(memory, totalActiveResources.memoryGb()), divide(disk, totalActiveResources.diskGb())); return new NodeRepoStats(load, activeLoad); }
class NodeRepoStats { private final Load load; private final Load activeLoad; private NodeRepoStats(Load load, Load activeLoad) { this.load = load; this.activeLoad = activeLoad; } /** * Returns the current average work-extracting utilization in this node repo over all nodes. * Capacity not allocated to active nodes are taken to have 0 utilization as it provides no useful work. */ public Load load() { return load; } /** Returns the current average utilization in this node repo over all active nodes. */ public Load activeLoad() { return activeLoad; } private static double divide(double a, double b) { if (a == 0 && b == 0) return 0; return a / b; } }
class NodeRepoStats { private final Load load; private final Load activeLoad; private NodeRepoStats(Load load, Load activeLoad) { this.load = load; this.activeLoad = activeLoad; } /** * Returns the current average work-extracting utilization in this node repo over all nodes. * Capacity not allocated to active nodes are taken to have 0 utilization as it provides no useful work. */ public Load load() { return load; } /** Returns the current average utilization in this node repo over all active nodes. */ public Load activeLoad() { return activeLoad; } private static double divide(double a, double b) { if (a == 0 && b == 0) return 0; return a / b; } }
We should consider adding a method to list zones including the controller zone so we avoid having to do this concat everywhere
private HttpResponse root(HttpRequest request) { Slime slime = new Slime(); Cursor root = slime.setObject(); ZoneList zoneList = zoneRegistry.zones().reachable(); Cursor zones = root.setArray("zones"); Stream.concat(Stream.of(controllerZone), zoneRegistry.zones().reachable().ids().stream()) .forEach(zone -> { Cursor object = zones.addObject(); object.setString("environment", zone.environment().value()); object.setString("region", zone.region().value()); object.setString("uri", request.getUri().resolve( "/configserver/v1/" + zone.environment().value() + "/" + zone.region().value()).toString()); }); return new SlimeJsonResponse(slime); }
Stream.concat(Stream.of(controllerZone), zoneRegistry.zones().reachable().ids().stream())
private HttpResponse root(HttpRequest request) { Slime slime = new Slime(); Cursor root = slime.setObject(); ZoneList zoneList = zoneRegistry.zones().reachable(); Cursor zones = root.setArray("zones"); Stream.concat(Stream.of(controllerZone), zoneRegistry.zones().reachable().ids().stream()) .forEach(zone -> { Cursor object = zones.addObject(); object.setString("environment", zone.environment().value()); object.setString("region", zone.region().value()); object.setString("uri", request.getUri().resolve( "/configserver/v1/" + zone.environment().value() + "/" + zone.region().value()).toString()); }); return new SlimeJsonResponse(slime); }
class ConfigServerApiHandler extends AuditLoggingRequestHandler { private static final URI CONTROLLER_URI = URI.create("https: private static final List<String> WHITELISTED_APIS = List.of("/flags/v1/", "/nodes/v2/", "/orchestrator/v1/"); private final ZoneRegistry zoneRegistry; private final ConfigServerRestExecutor proxy; private final ZoneId controllerZone; public ConfigServerApiHandler(Context parentCtx, ServiceRegistry serviceRegistry, ConfigServerRestExecutor proxy, Controller controller) { super(parentCtx, controller.auditLogger()); this.zoneRegistry = serviceRegistry.zoneRegistry(); this.controllerZone = zoneRegistry.systemZone().getVirtualId(); this.proxy = proxy; } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return get(request); case POST: case PUT: case DELETE: case PATCH: return proxy(request); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is unsupported"); } } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "', " + Exceptions.toMessageString(e)); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse get(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/configserver/v1")) { return root(request); } return proxy(request); } private HttpResponse proxy(HttpRequest request) { Path path = new Path(request.getUri()); if ( ! path.matches("/configserver/v1/{environment}/{region}/{*}")) { return ErrorResponse.notFoundError("Nothing at " + path); } ZoneId zoneId = ZoneId.from(path.get("environment"), path.get("region")); if (! zoneRegistry.hasZone(zoneId) && ! controllerZone.equals(zoneId)) { throw new IllegalArgumentException("No such zone: " + zoneId.value()); } String cfgPath = "/" + path.getRest(); if (WHITELISTED_APIS.stream().noneMatch(cfgPath::startsWith)) { return ErrorResponse.forbidden("Cannot access '" + cfgPath + "' through /configserver/v1, following APIs are permitted: " + String.join(", ", WHITELISTED_APIS)); } return proxy.handle(ProxyRequest.tryOne(getEndpoint(zoneId), cfgPath, request)); } private HttpResponse notFound(Path path) { return ErrorResponse.notFoundError("Nothing at " + path); } private URI getEndpoint(ZoneId zoneId) { return controllerZone.equals(zoneId) ? CONTROLLER_URI : zoneRegistry.getConfigServerVipUri(zoneId); } }
class ConfigServerApiHandler extends AuditLoggingRequestHandler { private static final URI CONTROLLER_URI = URI.create("https: private static final List<String> WHITELISTED_APIS = List.of("/flags/v1/", "/nodes/v2/", "/orchestrator/v1/"); private final ZoneRegistry zoneRegistry; private final ConfigServerRestExecutor proxy; private final ZoneId controllerZone; public ConfigServerApiHandler(Context parentCtx, ServiceRegistry serviceRegistry, ConfigServerRestExecutor proxy, Controller controller) { super(parentCtx, controller.auditLogger()); this.zoneRegistry = serviceRegistry.zoneRegistry(); this.controllerZone = zoneRegistry.systemZone().getVirtualId(); this.proxy = proxy; } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return get(request); case POST: case PUT: case DELETE: case PATCH: return proxy(request); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is unsupported"); } } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "', " + Exceptions.toMessageString(e)); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse get(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/configserver/v1")) { return root(request); } return proxy(request); } private HttpResponse proxy(HttpRequest request) { Path path = new Path(request.getUri()); if ( ! path.matches("/configserver/v1/{environment}/{region}/{*}")) { return ErrorResponse.notFoundError("Nothing at " + path); } ZoneId zoneId = ZoneId.from(path.get("environment"), path.get("region")); if (! zoneRegistry.hasZone(zoneId) && ! controllerZone.equals(zoneId)) { throw new IllegalArgumentException("No such zone: " + zoneId.value()); } String cfgPath = "/" + path.getRest(); if (WHITELISTED_APIS.stream().noneMatch(cfgPath::startsWith)) { return ErrorResponse.forbidden("Cannot access '" + cfgPath + "' through /configserver/v1, following APIs are permitted: " + String.join(", ", WHITELISTED_APIS)); } return proxy.handle(ProxyRequest.tryOne(getEndpoint(zoneId), cfgPath, request)); } private HttpResponse notFound(Path path) { return ErrorResponse.notFoundError("Nothing at " + path); } private URI getEndpoint(ZoneId zoneId) { return controllerZone.equals(zoneId) ? CONTROLLER_URI : zoneRegistry.getConfigServerVipUri(zoneId); } }
Yes, I thought about, but it wasn't immediately obvious how to best add this to `ZoneList`. Will do it in a future PR.
private HttpResponse root(HttpRequest request) { Slime slime = new Slime(); Cursor root = slime.setObject(); ZoneList zoneList = zoneRegistry.zones().reachable(); Cursor zones = root.setArray("zones"); Stream.concat(Stream.of(controllerZone), zoneRegistry.zones().reachable().ids().stream()) .forEach(zone -> { Cursor object = zones.addObject(); object.setString("environment", zone.environment().value()); object.setString("region", zone.region().value()); object.setString("uri", request.getUri().resolve( "/configserver/v1/" + zone.environment().value() + "/" + zone.region().value()).toString()); }); return new SlimeJsonResponse(slime); }
Stream.concat(Stream.of(controllerZone), zoneRegistry.zones().reachable().ids().stream())
private HttpResponse root(HttpRequest request) { Slime slime = new Slime(); Cursor root = slime.setObject(); ZoneList zoneList = zoneRegistry.zones().reachable(); Cursor zones = root.setArray("zones"); Stream.concat(Stream.of(controllerZone), zoneRegistry.zones().reachable().ids().stream()) .forEach(zone -> { Cursor object = zones.addObject(); object.setString("environment", zone.environment().value()); object.setString("region", zone.region().value()); object.setString("uri", request.getUri().resolve( "/configserver/v1/" + zone.environment().value() + "/" + zone.region().value()).toString()); }); return new SlimeJsonResponse(slime); }
class ConfigServerApiHandler extends AuditLoggingRequestHandler { private static final URI CONTROLLER_URI = URI.create("https: private static final List<String> WHITELISTED_APIS = List.of("/flags/v1/", "/nodes/v2/", "/orchestrator/v1/"); private final ZoneRegistry zoneRegistry; private final ConfigServerRestExecutor proxy; private final ZoneId controllerZone; public ConfigServerApiHandler(Context parentCtx, ServiceRegistry serviceRegistry, ConfigServerRestExecutor proxy, Controller controller) { super(parentCtx, controller.auditLogger()); this.zoneRegistry = serviceRegistry.zoneRegistry(); this.controllerZone = zoneRegistry.systemZone().getVirtualId(); this.proxy = proxy; } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return get(request); case POST: case PUT: case DELETE: case PATCH: return proxy(request); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is unsupported"); } } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "', " + Exceptions.toMessageString(e)); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse get(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/configserver/v1")) { return root(request); } return proxy(request); } private HttpResponse proxy(HttpRequest request) { Path path = new Path(request.getUri()); if ( ! path.matches("/configserver/v1/{environment}/{region}/{*}")) { return ErrorResponse.notFoundError("Nothing at " + path); } ZoneId zoneId = ZoneId.from(path.get("environment"), path.get("region")); if (! zoneRegistry.hasZone(zoneId) && ! controllerZone.equals(zoneId)) { throw new IllegalArgumentException("No such zone: " + zoneId.value()); } String cfgPath = "/" + path.getRest(); if (WHITELISTED_APIS.stream().noneMatch(cfgPath::startsWith)) { return ErrorResponse.forbidden("Cannot access '" + cfgPath + "' through /configserver/v1, following APIs are permitted: " + String.join(", ", WHITELISTED_APIS)); } return proxy.handle(ProxyRequest.tryOne(getEndpoint(zoneId), cfgPath, request)); } private HttpResponse notFound(Path path) { return ErrorResponse.notFoundError("Nothing at " + path); } private URI getEndpoint(ZoneId zoneId) { return controllerZone.equals(zoneId) ? CONTROLLER_URI : zoneRegistry.getConfigServerVipUri(zoneId); } }
class ConfigServerApiHandler extends AuditLoggingRequestHandler { private static final URI CONTROLLER_URI = URI.create("https: private static final List<String> WHITELISTED_APIS = List.of("/flags/v1/", "/nodes/v2/", "/orchestrator/v1/"); private final ZoneRegistry zoneRegistry; private final ConfigServerRestExecutor proxy; private final ZoneId controllerZone; public ConfigServerApiHandler(Context parentCtx, ServiceRegistry serviceRegistry, ConfigServerRestExecutor proxy, Controller controller) { super(parentCtx, controller.auditLogger()); this.zoneRegistry = serviceRegistry.zoneRegistry(); this.controllerZone = zoneRegistry.systemZone().getVirtualId(); this.proxy = proxy; } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return get(request); case POST: case PUT: case DELETE: case PATCH: return proxy(request); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is unsupported"); } } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "', " + Exceptions.toMessageString(e)); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse get(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/configserver/v1")) { return root(request); } return proxy(request); } private HttpResponse proxy(HttpRequest request) { Path path = new Path(request.getUri()); if ( ! path.matches("/configserver/v1/{environment}/{region}/{*}")) { return ErrorResponse.notFoundError("Nothing at " + path); } ZoneId zoneId = ZoneId.from(path.get("environment"), path.get("region")); if (! zoneRegistry.hasZone(zoneId) && ! controllerZone.equals(zoneId)) { throw new IllegalArgumentException("No such zone: " + zoneId.value()); } String cfgPath = "/" + path.getRest(); if (WHITELISTED_APIS.stream().noneMatch(cfgPath::startsWith)) { return ErrorResponse.forbidden("Cannot access '" + cfgPath + "' through /configserver/v1, following APIs are permitted: " + String.join(", ", WHITELISTED_APIS)); } return proxy.handle(ProxyRequest.tryOne(getEndpoint(zoneId), cfgPath, request)); } private HttpResponse notFound(Path path) { return ErrorResponse.notFoundError("Nothing at " + path); } private URI getEndpoint(ZoneId zoneId) { return controllerZone.equals(zoneId) ? CONTROLLER_URI : zoneRegistry.getConfigServerVipUri(zoneId); } }
What about combined nodes? They have containers as well
private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getSystemName().isCd() || zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().isContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration; }
|| membership.map(mem -> ! (mem.type().isContainer() || mem.type().isAdmin())).orElse(false)
private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getSystemName().isCd() || zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().isContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration; }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); containerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, Level.FINE, "Starting optional node program resume command"); containerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); containerOperations.createContainer(context, containerData, wantedResources); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); orchestratorSuspendNode(context); containerOperations.restartVespa(context); currentRestartGeneration = context.node().wantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices( NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state.isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; containerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image)) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() != NodeState.dirty) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources)) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources.memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.node().owner() .map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())) .orElse(containerCpuCap) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value() * context.vcpuOnThisHost(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || zone.getSystemName().isCd(); } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, Level.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container); if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources.equalsCpu(getContainerResources(context))) throw new ConvergenceException("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } updateNodeRepoWithCurrentAttributes(context); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.state().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void addDirectory(Path pathInContainer) { throw new UnsupportedOperationException("addDirectory not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); containerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, Level.FINE, "Starting optional node program resume command"); containerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); containerOperations.createContainer(context, containerData, wantedResources); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); orchestratorSuspendNode(context); containerOperations.restartVespa(context); currentRestartGeneration = context.node().wantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices( NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state.isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; containerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image)) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() != NodeState.dirty) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources)) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources.memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.node().owner() .map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())) .orElse(containerCpuCap) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value() * context.vcpuOnThisHost(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || zone.getSystemName().isCd(); } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, Level.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container); if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources.equalsCpu(getContainerResources(context))) throw new ConvergenceException("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } updateNodeRepoWithCurrentAttributes(context); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.state().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void addDirectory(Path pathInContainer) { throw new UnsupportedOperationException("addDirectory not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } }
Ah, isContainer() includes combined. That's OK then
private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getSystemName().isCd() || zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().isContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration; }
|| membership.map(mem -> ! (mem.type().isContainer() || mem.type().isAdmin())).orElse(false)
private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getSystemName().isCd() || zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().isContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration; }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); containerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, Level.FINE, "Starting optional node program resume command"); containerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); containerOperations.createContainer(context, containerData, wantedResources); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); orchestratorSuspendNode(context); containerOperations.restartVespa(context); currentRestartGeneration = context.node().wantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices( NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state.isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; containerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image)) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() != NodeState.dirty) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources)) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources.memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.node().owner() .map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())) .orElse(containerCpuCap) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value() * context.vcpuOnThisHost(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || zone.getSystemName().isCd(); } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, Level.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container); if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources.equalsCpu(getContainerResources(context))) throw new ConvergenceException("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } updateNodeRepoWithCurrentAttributes(context); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.state().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void addDirectory(Path pathInContainer) { throw new UnsupportedOperationException("addDirectory not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); containerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, Level.FINE, "Starting optional node program resume command"); containerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); containerOperations.createContainer(context, containerData, wantedResources); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); orchestratorSuspendNode(context); containerOperations.restartVespa(context); currentRestartGeneration = context.node().wantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices( NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state.isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; containerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image)) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() != NodeState.dirty) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources)) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources.memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.node().owner() .map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())) .orElse(containerCpuCap) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value() * context.vcpuOnThisHost(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || zone.getSystemName().isCd(); } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, Level.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container); if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources.equalsCpu(getContainerResources(context))) throw new ConvergenceException("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } updateNodeRepoWithCurrentAttributes(context); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.state().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void addDirectory(Path pathInContainer) { throw new UnsupportedOperationException("addDirectory not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } }
```suggestion toSlime(stats.activeLoad(), zoneObject.setObject("activeLoad")); ```
private static Slime toSlime(Controller controller) { try { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor zonesArray = root.setArray("zones"); for (ZoneId zone : controller.zoneRegistry().zones().all().ids()) { NodeRepoStats stats = controller.serviceRegistry().configServer().nodeRepository().getStats(zone); if (stats.applicationStats().isEmpty()) continue; Cursor zoneObject = zonesArray.addObject(); zoneObject.setString("id", zone.toString()); toSlime(stats.load(), zoneObject.setObject("load")); toSlime(stats.load(), zoneObject.setObject("activeLoad")); Cursor applicationsArray = zoneObject.setArray("applications"); for (var applicationStats : stats.applicationStats()) toSlime(applicationStats, applicationsArray.addObject()); } return slime; } catch (Exception e) { e.printStackTrace(); throw e; } }
toSlime(stats.load(), zoneObject.setObject("activeLoad"));
private static Slime toSlime(Controller controller) { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor zonesArray = root.setArray("zones"); for (ZoneId zone : controller.zoneRegistry().zones().reachable().ids()) { NodeRepoStats stats = controller.serviceRegistry().configServer().nodeRepository().getStats(zone); if (stats.applicationStats().isEmpty()) continue; Cursor zoneObject = zonesArray.addObject(); zoneObject.setString("id", zone.toString()); toSlime(stats.load(), zoneObject.setObject("load")); toSlime(stats.activeLoad(), zoneObject.setObject("activeLoad")); Cursor applicationsArray = zoneObject.setArray("applications"); for (var applicationStats : stats.applicationStats()) toSlime(applicationStats, applicationsArray.addObject()); } return slime; }
class StatsResponse extends SlimeJsonResponse { public StatsResponse(Controller controller) { super(toSlime(controller)); } private static void toSlime(ApplicationStats stats, Cursor applicationObject) { applicationObject.setString("id", stats.id().toFullString()); toSlime(stats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", stats.cost()); applicationObject.setDouble("unutilizedCost", stats.unutilizedCost()); } private static void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } }
class StatsResponse extends SlimeJsonResponse { public StatsResponse(Controller controller) { super(toSlime(controller)); } private static void toSlime(ApplicationStats stats, Cursor applicationObject) { applicationObject.setString("id", stats.id().toFullString()); toSlime(stats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", stats.cost()); applicationObject.setDouble("unutilizedCost", stats.unutilizedCost()); } private static void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } }
try/catch just for testing? Remove?
private static Slime toSlime(Controller controller) { try { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor zonesArray = root.setArray("zones"); for (ZoneId zone : controller.zoneRegistry().zones().all().ids()) { NodeRepoStats stats = controller.serviceRegistry().configServer().nodeRepository().getStats(zone); if (stats.applicationStats().isEmpty()) continue; Cursor zoneObject = zonesArray.addObject(); zoneObject.setString("id", zone.toString()); toSlime(stats.load(), zoneObject.setObject("load")); toSlime(stats.load(), zoneObject.setObject("activeLoad")); Cursor applicationsArray = zoneObject.setArray("applications"); for (var applicationStats : stats.applicationStats()) toSlime(applicationStats, applicationsArray.addObject()); } return slime; } catch (Exception e) { e.printStackTrace(); throw e; } }
e.printStackTrace();
private static Slime toSlime(Controller controller) { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor zonesArray = root.setArray("zones"); for (ZoneId zone : controller.zoneRegistry().zones().reachable().ids()) { NodeRepoStats stats = controller.serviceRegistry().configServer().nodeRepository().getStats(zone); if (stats.applicationStats().isEmpty()) continue; Cursor zoneObject = zonesArray.addObject(); zoneObject.setString("id", zone.toString()); toSlime(stats.load(), zoneObject.setObject("load")); toSlime(stats.activeLoad(), zoneObject.setObject("activeLoad")); Cursor applicationsArray = zoneObject.setArray("applications"); for (var applicationStats : stats.applicationStats()) toSlime(applicationStats, applicationsArray.addObject()); } return slime; }
class StatsResponse extends SlimeJsonResponse { public StatsResponse(Controller controller) { super(toSlime(controller)); } private static void toSlime(ApplicationStats stats, Cursor applicationObject) { applicationObject.setString("id", stats.id().toFullString()); toSlime(stats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", stats.cost()); applicationObject.setDouble("unutilizedCost", stats.unutilizedCost()); } private static void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } }
class StatsResponse extends SlimeJsonResponse { public StatsResponse(Controller controller) { super(toSlime(controller)); } private static void toSlime(ApplicationStats stats, Cursor applicationObject) { applicationObject.setString("id", stats.id().toFullString()); toSlime(stats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", stats.cost()); applicationObject.setDouble("unutilizedCost", stats.unutilizedCost()); } private static void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } }
```suggestion toSlime(stats.activeLoad(), root.setObject("activeLoad")); ```
private HttpResponse stats() { var stats = nodeRepository.computeStats(); Slime slime = new Slime(); Cursor root = slime.setObject(); toSlime(stats.load(), root.setObject("load")); toSlime(stats.load(), root.setObject("activeLoad")); Cursor applicationsObject = root.setArray("applications"); for (int i = 0; i <= 5; i++) { if (i >= stats.applicationStats().size()) break; var applicationStats = stats.applicationStats().get(i); Cursor applicationObject = applicationsObject.addObject(); applicationObject.setString("id", applicationStats.id().toFullString()); toSlime(applicationStats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", applicationStats.cost()); applicationObject.setDouble("unutilizedCost", applicationStats.unutilizedCost()); } return new SlimeJsonResponse(slime); }
toSlime(stats.load(), root.setObject("activeLoad"));
private HttpResponse stats() { var stats = nodeRepository.computeStats(); Slime slime = new Slime(); Cursor root = slime.setObject(); toSlime(stats.load(), root.setObject("load")); toSlime(stats.activeLoad(), root.setObject("activeLoad")); Cursor applicationsArray = root.setArray("applications"); for (int i = 0; i <= 5; i++) { if (i >= stats.applicationStats().size()) break; var applicationStats = stats.applicationStats().get(i); Cursor applicationObject = applicationsArray.addObject(); applicationObject.setString("id", applicationStats.id().toFullString()); toSlime(applicationStats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", applicationStats.cost()); applicationObject.setDouble("unutilizedCost", applicationStats.unutilizedCost()); } return new SlimeJsonResponse(slime); }
class NodesV2ApiHandler extends LoggingRequestHandler { private final Orchestrator orchestrator; private final NodeRepository nodeRepository; private final MetricsDb metricsDb; private final NodeFlavors nodeFlavors; @Inject public NodesV2ApiHandler(LoggingRequestHandler.Context parentCtx, Orchestrator orchestrator, NodeRepository nodeRepository, MetricsDb metricsDb, NodeFlavors flavors) { super(parentCtx); this.orchestrator = orchestrator; this.nodeRepository = nodeRepository; this.metricsDb = metricsDb; this.nodeFlavors = flavors; } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return isPatchOverride(request) ? handlePATCH(request) : handlePOST(request); case DELETE: return handleDELETE(request); case PATCH: return handlePATCH(request); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (NotFoundException | NoSuchNodeException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri()); String pathS = request.getUri().getPath(); if (path.matches( "/nodes/v2")) return new ResourceResponse(request.getUri(), "node", "state", "acl", "command", "archive", "locks", "maintenance", "upgrade", "capacity", "application", "stats"); if (path.matches( "/nodes/v2/node")) return new NodesResponse(ResponseType.nodeList, request, orchestrator, nodeRepository); if (pathS.startsWith("/nodes/v2/node/")) return new NodesResponse(ResponseType.singleNode, request, orchestrator, nodeRepository); if (path.matches( "/nodes/v2/state")) return new NodesResponse(ResponseType.stateList, request, orchestrator, nodeRepository); if (pathS.startsWith("/nodes/v2/state/")) return new NodesResponse(ResponseType.nodesInStateList, request, orchestrator, nodeRepository); if (path.matches( "/nodes/v2/acl/{hostname}")) return new NodeAclResponse(request, nodeRepository, path.get("hostname")); if (path.matches( "/nodes/v2/command")) return new ResourceResponse(request.getUri(), "restart", "reboot"); if (path.matches( "/nodes/v2/archive")) return new ArchiveResponse(nodeRepository); if (path.matches( "/nodes/v2/locks")) return new LocksResponse(); if (path.matches( "/nodes/v2/maintenance")) return new JobsResponse(nodeRepository.jobControl()); if (path.matches( "/nodes/v2/upgrade")) return new UpgradeResponse(nodeRepository.infrastructureVersions(), nodeRepository.osVersions(), nodeRepository.containerImages()); if (path.matches( "/nodes/v2/capacity")) return new HostCapacityResponse(nodeRepository, request); if (path.matches( "/nodes/v2/application")) return applicationList(request.getUri()); if (path.matches( "/nodes/v2/application/{applicationId}")) return application(path.get("applicationId"), request.getUri()); if (path.matches( "/nodes/v2/stats")) return stats(); throw new NotFoundException("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/state/ready/{hostname}")) { nodeRepository.nodes().markNodeAvailableForNewAllocation(path.get("hostname"), Agent.operator, "Readied through the nodes/v2 API"); return new MessageResponse("Moved " + path.get("hostname") + " to ready"); } else if (path.matches("/nodes/v2/state/failed/{hostname}")) { List<Node> failedNodes = nodeRepository.nodes().failRecursively(path.get("hostname"), Agent.operator, "Failed through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(failedNodes) + " to failed"); } else if (path.matches("/nodes/v2/state/parked/{hostname}")) { List<Node> parkedNodes = nodeRepository.nodes().parkRecursively(path.get("hostname"), Agent.operator, "Parked through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(parkedNodes) + " to parked"); } else if (path.matches("/nodes/v2/state/dirty/{hostname}")) { List<Node> dirtiedNodes = nodeRepository.nodes().deallocateRecursively(path.get("hostname"), Agent.operator, "Dirtied through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(dirtiedNodes) + " to dirty"); } else if (path.matches("/nodes/v2/state/active/{hostname}")) { nodeRepository.nodes().reactivate(path.get("hostname"), Agent.operator, "Reactivated through nodes/v2 API"); return new MessageResponse("Moved " + path.get("hostname") + " to active"); } else if (path.matches("/nodes/v2/state/breakfixed/{hostname}")) { List<Node> breakfixedNodes = nodeRepository.nodes().breakfixRecursively(path.get("hostname"), Agent.operator, "Breakfixed through the nodes/v2 API"); return new MessageResponse("Breakfixed " + hostnamesAsString(breakfixedNodes)); } throw new NotFoundException("Cannot put to path '" + path + "'"); } private HttpResponse handlePATCH(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/node/{hostname}")) { try (NodePatcher patcher = new NodePatcher(nodeFlavors, request.getData(), nodeFromHostname(path.get("hostname")), nodeRepository)) { var patchedNodes = patcher.apply(); nodeRepository.nodes().write(patchedNodes, patcher.nodeMutexOfHost()); return new MessageResponse("Updated " + patcher.nodeMutexOfHost().node().hostname()); } } else if (path.matches("/nodes/v2/application/{applicationId}")) { try (ApplicationPatcher patcher = new ApplicationPatcher(request.getData(), ApplicationId.fromFullString(path.get("applicationId")), nodeRepository)) { nodeRepository.applications().put(patcher.apply(), patcher.lock()); return new MessageResponse("Updated " + patcher.application()); } } else if (path.matches("/nodes/v2/archive/{tenant}")) { String uri = requiredField(toSlime(request), "uri", Inspector::asString); return setTenantArchiveUri(path.get("tenant"), Optional.of(uri)); } else if (path.matches("/nodes/v2/upgrade/{nodeType}")) { return setTargetVersions(path.get("nodeType"), toSlime(request)); } throw new NotFoundException("Nothing at '" + path + "'"); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/command/restart")) { int restartCount = nodeRepository.nodes().restart(toNodeFilter(request)).size(); return new MessageResponse("Scheduled restart of " + restartCount + " matching nodes"); } if (path.matches("/nodes/v2/command/reboot")) { int rebootCount = nodeRepository.nodes().reboot(toNodeFilter(request)).size(); return new MessageResponse("Scheduled reboot of " + rebootCount + " matching nodes"); } if (path.matches("/nodes/v2/node")) { int addedNodes = addNodes(toSlime(request)); return new MessageResponse("Added " + addedNodes + " nodes to the provisioned state"); } if (path.matches("/nodes/v2/maintenance/run/{job}")) return runJob(path.get("job")); if (path.matches("/nodes/v2/upgrade/firmware")) return requestFirmwareCheckResponse(); throw new NotFoundException("Nothing at path '" + request.getUri().getPath() + "'"); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/node/{hostname}")) return deleteNode(path.get("hostname")); if (path.matches("/nodes/v2/archive/{tenant}")) return setTenantArchiveUri(path.get("tenant"), Optional.empty()); if (path.matches("/nodes/v2/upgrade/firmware")) return cancelFirmwareCheckResponse(); throw new NotFoundException("Nothing at path '" + request.getUri().getPath() + "'"); } private HttpResponse runJob(String job) { nodeRepository.jobControl().run(job); return new MessageResponse("Executed job '" + job + "'"); } private HttpResponse deleteNode(String hostname) { Optional<NodeMutex> nodeMutex = nodeRepository.nodes().lockAndGet(hostname); if (nodeMutex.isEmpty()) throw new NotFoundException("No node with hostname '" + hostname + "'"); try (var lock = nodeMutex.get()) { if (lock.node().state() == Node.State.deprovisioned) { nodeRepository.nodes().forget(lock.node()); return new MessageResponse("Permanently removed " + hostname); } else { List<Node> removedNodes = nodeRepository.nodes().removeRecursively(hostname); return new MessageResponse("Removed " + removedNodes.stream().map(Node::hostname).collect(Collectors.joining(", "))); } } } private Node nodeFromHostname(String hostname) { return nodeRepository.nodes().node(hostname).orElseThrow(() -> new NotFoundException("No node found with hostname " + hostname)); } public int addNodes(Inspector inspector) { List<Node> nodes = createNodesFromSlime(inspector); return nodeRepository.nodes().addNodes(nodes, Agent.operator).size(); } private Inspector toSlime(HttpRequest request) { try { byte[] jsonBytes = IOUtils.readBytes(request.getData(), 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes).get(); } catch (IOException e) { throw new UncheckedIOException(e); } } private List<Node> createNodesFromSlime(Inspector object) { List<Node> nodes = new ArrayList<>(); object.traverse((ArrayTraverser) (int i, Inspector item) -> nodes.add(createNode(item))); return nodes; } private Node createNode(Inspector inspector) { Set<String> ipAddresses = new HashSet<>(); inspector.field("ipAddresses").traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); Set<String> ipAddressPool = new HashSet<>(); inspector.field("additionalIpAddresses").traverse((ArrayTraverser) (i, item) -> ipAddressPool.add(item.asString())); List<Address> addressPool = new ArrayList<>(); inspector.field("additionalHostnames").traverse((ArrayTraverser) (i, item) -> addressPool.add(new Address(item.asString()))); Node.Builder builder = Node.create(inspector.field("openStackId").asString(), IP.Config.of(ipAddresses, ipAddressPool, addressPool), inspector.field("hostname").asString(), flavorFromSlime(inspector), nodeTypeFromSlime(inspector.field("type"))); optionalString(inspector.field("parentHostname")).ifPresent(builder::parentHostname); optionalString(inspector.field("modelName")).ifPresent(builder::modelName); optionalString(inspector.field("reservedTo")).map(TenantName::from).ifPresent(builder::reservedTo); optionalString(inspector.field("exclusiveTo")).map(ApplicationId::fromSerializedForm).ifPresent(builder::exclusiveTo); optionalString(inspector.field("switchHostname")).ifPresent(builder::switchHostname); return builder.build(); } private Flavor flavorFromSlime(Inspector inspector) { Inspector flavorInspector = inspector.field("flavor"); Inspector resourcesInspector = inspector.field("resources"); if ( ! flavorInspector.valid()) { return new Flavor(new NodeResources( requiredField(resourcesInspector, "vcpu", Inspector::asDouble), requiredField(resourcesInspector, "memoryGb", Inspector::asDouble), requiredField(resourcesInspector, "diskGb", Inspector::asDouble), requiredField(resourcesInspector, "bandwidthGbps", Inspector::asDouble), optionalString(resourcesInspector.field("diskSpeed")).map(NodeResourcesSerializer::diskSpeedFrom).orElse(NodeResources.DiskSpeed.getDefault()), optionalString(resourcesInspector.field("storageType")).map(NodeResourcesSerializer::storageTypeFrom).orElse(NodeResources.StorageType.getDefault()))); } Flavor flavor = nodeFlavors.getFlavorOrThrow(flavorInspector.asString()); if (resourcesInspector.valid()) { if (resourcesInspector.field("vcpu").valid()) flavor = flavor.with(flavor.resources().withVcpu(resourcesInspector.field("vcpu").asDouble())); if (resourcesInspector.field("memoryGb").valid()) flavor = flavor.with(flavor.resources().withMemoryGb(resourcesInspector.field("memoryGb").asDouble())); if (resourcesInspector.field("diskGb").valid()) flavor = flavor.with(flavor.resources().withDiskGb(resourcesInspector.field("diskGb").asDouble())); if (resourcesInspector.field("bandwidthGbps").valid()) flavor = flavor.with(flavor.resources().withBandwidthGbps(resourcesInspector.field("bandwidthGbps").asDouble())); if (resourcesInspector.field("diskSpeed").valid()) flavor = flavor.with(flavor.resources().with(NodeResourcesSerializer.diskSpeedFrom(resourcesInspector.field("diskSpeed").asString()))); if (resourcesInspector.field("storageType").valid()) flavor = flavor.with(flavor.resources().with(NodeResourcesSerializer.storageTypeFrom(resourcesInspector.field("storageType").asString()))); } return flavor; } private static <T> T requiredField(Inspector inspector, String fieldName, Function<Inspector, T> valueExtractor) { Inspector field = inspector.field(fieldName); if (!field.valid()) throw new IllegalArgumentException("Required field '" + fieldName + "' is missing"); return valueExtractor.apply(field); } private NodeType nodeTypeFromSlime(Inspector object) { if (! object.valid()) return NodeType.tenant; return NodeSerializer.typeFrom(object.asString()); } public static NodeFilter toNodeFilter(HttpRequest request) { NodeFilter filter = NodeHostFilter.from(HostFilter.from(request.getProperty("hostname"), request.getProperty("flavor"), request.getProperty("clusterType"), request.getProperty("clusterId"))); filter = ApplicationFilter.from(request.getProperty("application"), filter); filter = StateFilter.from(request.getProperty("state"), request.getBooleanProperty("includeDeprovisioned"), filter); filter = NodeTypeFilter.from(request.getProperty("type"), filter); filter = ParentHostFilter.from(request.getProperty("parentHost"), filter); filter = NodeOsVersionFilter.from(request.getProperty("osVersion"), filter); return filter; } private static boolean isPatchOverride(HttpRequest request) { String override = request.getHeader("X-HTTP-Method-Override"); if (override != null) { if (override.equals("PATCH")) { return true; } else { String msg = String.format("Illegal X-HTTP-Method-Override header for POST request. Accepts 'PATCH' but got '%s'", override); throw new IllegalArgumentException(msg); } } return false; } private MessageResponse setTargetVersions(String nodeTypeS, Inspector inspector) { NodeType nodeType = NodeType.valueOf(nodeTypeS.toLowerCase()); List<String> messageParts = new ArrayList<>(4); boolean force = inspector.field("force").asBool(); Inspector versionField = inspector.field("version"); Inspector osVersionField = inspector.field("osVersion"); Inspector containerImageField = inspector.field("dockerImage"); Inspector upgradeBudgetField = inspector.field("upgradeBudget"); if (versionField.valid()) { Version version = Version.fromString(versionField.asString()); nodeRepository.infrastructureVersions().setTargetVersion(nodeType, version, force); messageParts.add("version to " + version.toFullString()); } if (osVersionField.valid()) { String v = osVersionField.asString(); if (v.isEmpty()) { nodeRepository.osVersions().removeTarget(nodeType); messageParts.add("osVersion to null"); } else { Version osVersion = Version.fromString(v); Optional<Duration> upgradeBudget = Optional.of(upgradeBudgetField) .filter(Inspector::valid) .map(Inspector::asString) .map(s -> { try { return Duration.parse(s); } catch (Exception e) { throw new IllegalArgumentException("Invalid duration '" + s + "'", e); } }); nodeRepository.osVersions().setTarget(nodeType, osVersion, upgradeBudget, force); messageParts.add("osVersion to " + osVersion.toFullString()); upgradeBudget.ifPresent(d -> messageParts.add("upgradeBudget to " + d)); } } if (containerImageField.valid()) { Optional<DockerImage> dockerImage = Optional.of(containerImageField.asString()) .filter(s -> !s.isEmpty()) .map(DockerImage::fromString); nodeRepository.containerImages().setImage(nodeType, dockerImage); messageParts.add("container image to " + dockerImage.map(DockerImage::asString).orElse(null)); } if (messageParts.isEmpty()) { throw new IllegalArgumentException("At least one of 'version', 'osVersion' or 'dockerImage' must be set"); } return new MessageResponse("Set " + String.join(", ", messageParts) + " for nodes of type " + nodeType); } private MessageResponse cancelFirmwareCheckResponse() { nodeRepository.firmwareChecks().cancel(); return new MessageResponse("Cancelled outstanding requests for firmware checks"); } private MessageResponse requestFirmwareCheckResponse() { nodeRepository.firmwareChecks().request(); return new MessageResponse("Will request firmware checks on all hosts."); } private HttpResponse setTenantArchiveUri(String tenant, Optional<String> archiveUri) { nodeRepository.archiveUris().setArchiveUri(TenantName.from(tenant), archiveUri); return new MessageResponse(archiveUri.map(a -> "Updated").orElse("Removed") + " archive URI for " + tenant); } private static String hostnamesAsString(List<Node> nodes) { return nodes.stream().map(Node::hostname).sorted().collect(Collectors.joining(", ")); } private HttpResponse applicationList(URI uri) { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor applications = root.setArray("applications"); for (ApplicationId id : nodeRepository.applications().ids()) { Cursor application = applications.addObject(); application.setString("url", withPath("/nodes/v2/application/" + id.toFullString(), uri).toString()); application.setString("id", id.toFullString()); } return new SlimeJsonResponse(slime); } private HttpResponse application(String idString, URI uri) { ApplicationId id = ApplicationId.fromFullString(idString); Optional<Application> application = nodeRepository.applications().get(id); if (application.isEmpty()) return ErrorResponse.notFoundError("No application '" + id + "'"); Slime slime = ApplicationSerializer.toSlime(application.get(), nodeRepository.nodes().list(Node.State.active).owner(id), metricsDb, nodeRepository, withPath("/nodes/v2/applications/" + id, uri)); return new SlimeJsonResponse(slime); } private void toSlime(Load load, Cursor object) { object.setDouble("cpu", load.cpu()); object.setDouble("memory", load.memory()); object.setDouble("disk", load.disk()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } }
class NodesV2ApiHandler extends LoggingRequestHandler { private final Orchestrator orchestrator; private final NodeRepository nodeRepository; private final MetricsDb metricsDb; private final NodeFlavors nodeFlavors; @Inject public NodesV2ApiHandler(LoggingRequestHandler.Context parentCtx, Orchestrator orchestrator, NodeRepository nodeRepository, MetricsDb metricsDb, NodeFlavors flavors) { super(parentCtx); this.orchestrator = orchestrator; this.nodeRepository = nodeRepository; this.metricsDb = metricsDb; this.nodeFlavors = flavors; } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return isPatchOverride(request) ? handlePATCH(request) : handlePOST(request); case DELETE: return handleDELETE(request); case PATCH: return handlePATCH(request); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (NotFoundException | NoSuchNodeException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri()); String pathS = request.getUri().getPath(); if (path.matches( "/nodes/v2")) return new ResourceResponse(request.getUri(), "node", "state", "acl", "command", "archive", "locks", "maintenance", "upgrade", "capacity", "application", "stats"); if (path.matches( "/nodes/v2/node")) return new NodesResponse(ResponseType.nodeList, request, orchestrator, nodeRepository); if (pathS.startsWith("/nodes/v2/node/")) return new NodesResponse(ResponseType.singleNode, request, orchestrator, nodeRepository); if (path.matches( "/nodes/v2/state")) return new NodesResponse(ResponseType.stateList, request, orchestrator, nodeRepository); if (pathS.startsWith("/nodes/v2/state/")) return new NodesResponse(ResponseType.nodesInStateList, request, orchestrator, nodeRepository); if (path.matches( "/nodes/v2/acl/{hostname}")) return new NodeAclResponse(request, nodeRepository, path.get("hostname")); if (path.matches( "/nodes/v2/command")) return new ResourceResponse(request.getUri(), "restart", "reboot"); if (path.matches( "/nodes/v2/archive")) return new ArchiveResponse(nodeRepository); if (path.matches( "/nodes/v2/locks")) return new LocksResponse(); if (path.matches( "/nodes/v2/maintenance")) return new JobsResponse(nodeRepository.jobControl()); if (path.matches( "/nodes/v2/upgrade")) return new UpgradeResponse(nodeRepository.infrastructureVersions(), nodeRepository.osVersions(), nodeRepository.containerImages()); if (path.matches( "/nodes/v2/capacity")) return new HostCapacityResponse(nodeRepository, request); if (path.matches( "/nodes/v2/application")) return applicationList(request.getUri()); if (path.matches( "/nodes/v2/application/{applicationId}")) return application(path.get("applicationId"), request.getUri()); if (path.matches( "/nodes/v2/stats")) return stats(); throw new NotFoundException("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/state/ready/{hostname}")) { nodeRepository.nodes().markNodeAvailableForNewAllocation(path.get("hostname"), Agent.operator, "Readied through the nodes/v2 API"); return new MessageResponse("Moved " + path.get("hostname") + " to ready"); } else if (path.matches("/nodes/v2/state/failed/{hostname}")) { List<Node> failedNodes = nodeRepository.nodes().failRecursively(path.get("hostname"), Agent.operator, "Failed through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(failedNodes) + " to failed"); } else if (path.matches("/nodes/v2/state/parked/{hostname}")) { List<Node> parkedNodes = nodeRepository.nodes().parkRecursively(path.get("hostname"), Agent.operator, "Parked through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(parkedNodes) + " to parked"); } else if (path.matches("/nodes/v2/state/dirty/{hostname}")) { List<Node> dirtiedNodes = nodeRepository.nodes().deallocateRecursively(path.get("hostname"), Agent.operator, "Dirtied through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(dirtiedNodes) + " to dirty"); } else if (path.matches("/nodes/v2/state/active/{hostname}")) { nodeRepository.nodes().reactivate(path.get("hostname"), Agent.operator, "Reactivated through nodes/v2 API"); return new MessageResponse("Moved " + path.get("hostname") + " to active"); } else if (path.matches("/nodes/v2/state/breakfixed/{hostname}")) { List<Node> breakfixedNodes = nodeRepository.nodes().breakfixRecursively(path.get("hostname"), Agent.operator, "Breakfixed through the nodes/v2 API"); return new MessageResponse("Breakfixed " + hostnamesAsString(breakfixedNodes)); } throw new NotFoundException("Cannot put to path '" + path + "'"); } private HttpResponse handlePATCH(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/node/{hostname}")) { try (NodePatcher patcher = new NodePatcher(nodeFlavors, request.getData(), nodeFromHostname(path.get("hostname")), nodeRepository)) { var patchedNodes = patcher.apply(); nodeRepository.nodes().write(patchedNodes, patcher.nodeMutexOfHost()); return new MessageResponse("Updated " + patcher.nodeMutexOfHost().node().hostname()); } } else if (path.matches("/nodes/v2/application/{applicationId}")) { try (ApplicationPatcher patcher = new ApplicationPatcher(request.getData(), ApplicationId.fromFullString(path.get("applicationId")), nodeRepository)) { nodeRepository.applications().put(patcher.apply(), patcher.lock()); return new MessageResponse("Updated " + patcher.application()); } } else if (path.matches("/nodes/v2/archive/{tenant}")) { String uri = requiredField(toSlime(request), "uri", Inspector::asString); return setTenantArchiveUri(path.get("tenant"), Optional.of(uri)); } else if (path.matches("/nodes/v2/upgrade/{nodeType}")) { return setTargetVersions(path.get("nodeType"), toSlime(request)); } throw new NotFoundException("Nothing at '" + path + "'"); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/command/restart")) { int restartCount = nodeRepository.nodes().restart(toNodeFilter(request)).size(); return new MessageResponse("Scheduled restart of " + restartCount + " matching nodes"); } if (path.matches("/nodes/v2/command/reboot")) { int rebootCount = nodeRepository.nodes().reboot(toNodeFilter(request)).size(); return new MessageResponse("Scheduled reboot of " + rebootCount + " matching nodes"); } if (path.matches("/nodes/v2/node")) { int addedNodes = addNodes(toSlime(request)); return new MessageResponse("Added " + addedNodes + " nodes to the provisioned state"); } if (path.matches("/nodes/v2/maintenance/run/{job}")) return runJob(path.get("job")); if (path.matches("/nodes/v2/upgrade/firmware")) return requestFirmwareCheckResponse(); throw new NotFoundException("Nothing at path '" + request.getUri().getPath() + "'"); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/node/{hostname}")) return deleteNode(path.get("hostname")); if (path.matches("/nodes/v2/archive/{tenant}")) return setTenantArchiveUri(path.get("tenant"), Optional.empty()); if (path.matches("/nodes/v2/upgrade/firmware")) return cancelFirmwareCheckResponse(); throw new NotFoundException("Nothing at path '" + request.getUri().getPath() + "'"); } private HttpResponse runJob(String job) { nodeRepository.jobControl().run(job); return new MessageResponse("Executed job '" + job + "'"); } private HttpResponse deleteNode(String hostname) { Optional<NodeMutex> nodeMutex = nodeRepository.nodes().lockAndGet(hostname); if (nodeMutex.isEmpty()) throw new NotFoundException("No node with hostname '" + hostname + "'"); try (var lock = nodeMutex.get()) { if (lock.node().state() == Node.State.deprovisioned) { nodeRepository.nodes().forget(lock.node()); return new MessageResponse("Permanently removed " + hostname); } else { List<Node> removedNodes = nodeRepository.nodes().removeRecursively(hostname); return new MessageResponse("Removed " + removedNodes.stream().map(Node::hostname).collect(Collectors.joining(", "))); } } } private Node nodeFromHostname(String hostname) { return nodeRepository.nodes().node(hostname).orElseThrow(() -> new NotFoundException("No node found with hostname " + hostname)); } public int addNodes(Inspector inspector) { List<Node> nodes = createNodesFromSlime(inspector); return nodeRepository.nodes().addNodes(nodes, Agent.operator).size(); } private Inspector toSlime(HttpRequest request) { try { byte[] jsonBytes = IOUtils.readBytes(request.getData(), 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes).get(); } catch (IOException e) { throw new UncheckedIOException(e); } } private List<Node> createNodesFromSlime(Inspector object) { List<Node> nodes = new ArrayList<>(); object.traverse((ArrayTraverser) (int i, Inspector item) -> nodes.add(createNode(item))); return nodes; } private Node createNode(Inspector inspector) { Set<String> ipAddresses = new HashSet<>(); inspector.field("ipAddresses").traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); Set<String> ipAddressPool = new HashSet<>(); inspector.field("additionalIpAddresses").traverse((ArrayTraverser) (i, item) -> ipAddressPool.add(item.asString())); List<Address> addressPool = new ArrayList<>(); inspector.field("additionalHostnames").traverse((ArrayTraverser) (i, item) -> addressPool.add(new Address(item.asString()))); Node.Builder builder = Node.create(inspector.field("openStackId").asString(), IP.Config.of(ipAddresses, ipAddressPool, addressPool), inspector.field("hostname").asString(), flavorFromSlime(inspector), nodeTypeFromSlime(inspector.field("type"))); optionalString(inspector.field("parentHostname")).ifPresent(builder::parentHostname); optionalString(inspector.field("modelName")).ifPresent(builder::modelName); optionalString(inspector.field("reservedTo")).map(TenantName::from).ifPresent(builder::reservedTo); optionalString(inspector.field("exclusiveTo")).map(ApplicationId::fromSerializedForm).ifPresent(builder::exclusiveTo); optionalString(inspector.field("switchHostname")).ifPresent(builder::switchHostname); return builder.build(); } private Flavor flavorFromSlime(Inspector inspector) { Inspector flavorInspector = inspector.field("flavor"); Inspector resourcesInspector = inspector.field("resources"); if ( ! flavorInspector.valid()) { return new Flavor(new NodeResources( requiredField(resourcesInspector, "vcpu", Inspector::asDouble), requiredField(resourcesInspector, "memoryGb", Inspector::asDouble), requiredField(resourcesInspector, "diskGb", Inspector::asDouble), requiredField(resourcesInspector, "bandwidthGbps", Inspector::asDouble), optionalString(resourcesInspector.field("diskSpeed")).map(NodeResourcesSerializer::diskSpeedFrom).orElse(NodeResources.DiskSpeed.getDefault()), optionalString(resourcesInspector.field("storageType")).map(NodeResourcesSerializer::storageTypeFrom).orElse(NodeResources.StorageType.getDefault()))); } Flavor flavor = nodeFlavors.getFlavorOrThrow(flavorInspector.asString()); if (resourcesInspector.valid()) { if (resourcesInspector.field("vcpu").valid()) flavor = flavor.with(flavor.resources().withVcpu(resourcesInspector.field("vcpu").asDouble())); if (resourcesInspector.field("memoryGb").valid()) flavor = flavor.with(flavor.resources().withMemoryGb(resourcesInspector.field("memoryGb").asDouble())); if (resourcesInspector.field("diskGb").valid()) flavor = flavor.with(flavor.resources().withDiskGb(resourcesInspector.field("diskGb").asDouble())); if (resourcesInspector.field("bandwidthGbps").valid()) flavor = flavor.with(flavor.resources().withBandwidthGbps(resourcesInspector.field("bandwidthGbps").asDouble())); if (resourcesInspector.field("diskSpeed").valid()) flavor = flavor.with(flavor.resources().with(NodeResourcesSerializer.diskSpeedFrom(resourcesInspector.field("diskSpeed").asString()))); if (resourcesInspector.field("storageType").valid()) flavor = flavor.with(flavor.resources().with(NodeResourcesSerializer.storageTypeFrom(resourcesInspector.field("storageType").asString()))); } return flavor; } private static <T> T requiredField(Inspector inspector, String fieldName, Function<Inspector, T> valueExtractor) { Inspector field = inspector.field(fieldName); if (!field.valid()) throw new IllegalArgumentException("Required field '" + fieldName + "' is missing"); return valueExtractor.apply(field); } private NodeType nodeTypeFromSlime(Inspector object) { if (! object.valid()) return NodeType.tenant; return NodeSerializer.typeFrom(object.asString()); } public static NodeFilter toNodeFilter(HttpRequest request) { NodeFilter filter = NodeHostFilter.from(HostFilter.from(request.getProperty("hostname"), request.getProperty("flavor"), request.getProperty("clusterType"), request.getProperty("clusterId"))); filter = ApplicationFilter.from(request.getProperty("application"), filter); filter = StateFilter.from(request.getProperty("state"), request.getBooleanProperty("includeDeprovisioned"), filter); filter = NodeTypeFilter.from(request.getProperty("type"), filter); filter = ParentHostFilter.from(request.getProperty("parentHost"), filter); filter = NodeOsVersionFilter.from(request.getProperty("osVersion"), filter); return filter; } private static boolean isPatchOverride(HttpRequest request) { String override = request.getHeader("X-HTTP-Method-Override"); if (override != null) { if (override.equals("PATCH")) { return true; } else { String msg = String.format("Illegal X-HTTP-Method-Override header for POST request. Accepts 'PATCH' but got '%s'", override); throw new IllegalArgumentException(msg); } } return false; } private MessageResponse setTargetVersions(String nodeTypeS, Inspector inspector) { NodeType nodeType = NodeType.valueOf(nodeTypeS.toLowerCase()); List<String> messageParts = new ArrayList<>(4); boolean force = inspector.field("force").asBool(); Inspector versionField = inspector.field("version"); Inspector osVersionField = inspector.field("osVersion"); Inspector containerImageField = inspector.field("dockerImage"); Inspector upgradeBudgetField = inspector.field("upgradeBudget"); if (versionField.valid()) { Version version = Version.fromString(versionField.asString()); nodeRepository.infrastructureVersions().setTargetVersion(nodeType, version, force); messageParts.add("version to " + version.toFullString()); } if (osVersionField.valid()) { String v = osVersionField.asString(); if (v.isEmpty()) { nodeRepository.osVersions().removeTarget(nodeType); messageParts.add("osVersion to null"); } else { Version osVersion = Version.fromString(v); Optional<Duration> upgradeBudget = Optional.of(upgradeBudgetField) .filter(Inspector::valid) .map(Inspector::asString) .map(s -> { try { return Duration.parse(s); } catch (Exception e) { throw new IllegalArgumentException("Invalid duration '" + s + "'", e); } }); nodeRepository.osVersions().setTarget(nodeType, osVersion, upgradeBudget, force); messageParts.add("osVersion to " + osVersion.toFullString()); upgradeBudget.ifPresent(d -> messageParts.add("upgradeBudget to " + d)); } } if (containerImageField.valid()) { Optional<DockerImage> dockerImage = Optional.of(containerImageField.asString()) .filter(s -> !s.isEmpty()) .map(DockerImage::fromString); nodeRepository.containerImages().setImage(nodeType, dockerImage); messageParts.add("container image to " + dockerImage.map(DockerImage::asString).orElse(null)); } if (messageParts.isEmpty()) { throw new IllegalArgumentException("At least one of 'version', 'osVersion' or 'dockerImage' must be set"); } return new MessageResponse("Set " + String.join(", ", messageParts) + " for nodes of type " + nodeType); } private MessageResponse cancelFirmwareCheckResponse() { nodeRepository.firmwareChecks().cancel(); return new MessageResponse("Cancelled outstanding requests for firmware checks"); } private MessageResponse requestFirmwareCheckResponse() { nodeRepository.firmwareChecks().request(); return new MessageResponse("Will request firmware checks on all hosts."); } private HttpResponse setTenantArchiveUri(String tenant, Optional<String> archiveUri) { nodeRepository.archiveUris().setArchiveUri(TenantName.from(tenant), archiveUri); return new MessageResponse(archiveUri.map(a -> "Updated").orElse("Removed") + " archive URI for " + tenant); } private static String hostnamesAsString(List<Node> nodes) { return nodes.stream().map(Node::hostname).sorted().collect(Collectors.joining(", ")); } private HttpResponse applicationList(URI uri) { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor applications = root.setArray("applications"); for (ApplicationId id : nodeRepository.applications().ids()) { Cursor application = applications.addObject(); application.setString("url", withPath("/nodes/v2/application/" + id.toFullString(), uri).toString()); application.setString("id", id.toFullString()); } return new SlimeJsonResponse(slime); } private HttpResponse application(String idString, URI uri) { ApplicationId id = ApplicationId.fromFullString(idString); Optional<Application> application = nodeRepository.applications().get(id); if (application.isEmpty()) return ErrorResponse.notFoundError("No application '" + id + "'"); Slime slime = ApplicationSerializer.toSlime(application.get(), nodeRepository.nodes().list(Node.State.active).owner(id), metricsDb, nodeRepository, withPath("/nodes/v2/applications/" + id, uri)); return new SlimeJsonResponse(slime); } private void toSlime(Load load, Cursor object) { object.setDouble("cpu", load.cpu()); object.setDouble("memory", load.memory()); object.setDouble("disk", load.disk()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } }
`applicationsArray`
private HttpResponse stats() { var stats = nodeRepository.computeStats(); Slime slime = new Slime(); Cursor root = slime.setObject(); toSlime(stats.load(), root.setObject("load")); toSlime(stats.load(), root.setObject("activeLoad")); Cursor applicationsObject = root.setArray("applications"); for (int i = 0; i <= 5; i++) { if (i >= stats.applicationStats().size()) break; var applicationStats = stats.applicationStats().get(i); Cursor applicationObject = applicationsObject.addObject(); applicationObject.setString("id", applicationStats.id().toFullString()); toSlime(applicationStats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", applicationStats.cost()); applicationObject.setDouble("unutilizedCost", applicationStats.unutilizedCost()); } return new SlimeJsonResponse(slime); }
Cursor applicationsObject = root.setArray("applications");
private HttpResponse stats() { var stats = nodeRepository.computeStats(); Slime slime = new Slime(); Cursor root = slime.setObject(); toSlime(stats.load(), root.setObject("load")); toSlime(stats.activeLoad(), root.setObject("activeLoad")); Cursor applicationsArray = root.setArray("applications"); for (int i = 0; i <= 5; i++) { if (i >= stats.applicationStats().size()) break; var applicationStats = stats.applicationStats().get(i); Cursor applicationObject = applicationsArray.addObject(); applicationObject.setString("id", applicationStats.id().toFullString()); toSlime(applicationStats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", applicationStats.cost()); applicationObject.setDouble("unutilizedCost", applicationStats.unutilizedCost()); } return new SlimeJsonResponse(slime); }
class NodesV2ApiHandler extends LoggingRequestHandler { private final Orchestrator orchestrator; private final NodeRepository nodeRepository; private final MetricsDb metricsDb; private final NodeFlavors nodeFlavors; @Inject public NodesV2ApiHandler(LoggingRequestHandler.Context parentCtx, Orchestrator orchestrator, NodeRepository nodeRepository, MetricsDb metricsDb, NodeFlavors flavors) { super(parentCtx); this.orchestrator = orchestrator; this.nodeRepository = nodeRepository; this.metricsDb = metricsDb; this.nodeFlavors = flavors; } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return isPatchOverride(request) ? handlePATCH(request) : handlePOST(request); case DELETE: return handleDELETE(request); case PATCH: return handlePATCH(request); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (NotFoundException | NoSuchNodeException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri()); String pathS = request.getUri().getPath(); if (path.matches( "/nodes/v2")) return new ResourceResponse(request.getUri(), "node", "state", "acl", "command", "archive", "locks", "maintenance", "upgrade", "capacity", "application", "stats"); if (path.matches( "/nodes/v2/node")) return new NodesResponse(ResponseType.nodeList, request, orchestrator, nodeRepository); if (pathS.startsWith("/nodes/v2/node/")) return new NodesResponse(ResponseType.singleNode, request, orchestrator, nodeRepository); if (path.matches( "/nodes/v2/state")) return new NodesResponse(ResponseType.stateList, request, orchestrator, nodeRepository); if (pathS.startsWith("/nodes/v2/state/")) return new NodesResponse(ResponseType.nodesInStateList, request, orchestrator, nodeRepository); if (path.matches( "/nodes/v2/acl/{hostname}")) return new NodeAclResponse(request, nodeRepository, path.get("hostname")); if (path.matches( "/nodes/v2/command")) return new ResourceResponse(request.getUri(), "restart", "reboot"); if (path.matches( "/nodes/v2/archive")) return new ArchiveResponse(nodeRepository); if (path.matches( "/nodes/v2/locks")) return new LocksResponse(); if (path.matches( "/nodes/v2/maintenance")) return new JobsResponse(nodeRepository.jobControl()); if (path.matches( "/nodes/v2/upgrade")) return new UpgradeResponse(nodeRepository.infrastructureVersions(), nodeRepository.osVersions(), nodeRepository.containerImages()); if (path.matches( "/nodes/v2/capacity")) return new HostCapacityResponse(nodeRepository, request); if (path.matches( "/nodes/v2/application")) return applicationList(request.getUri()); if (path.matches( "/nodes/v2/application/{applicationId}")) return application(path.get("applicationId"), request.getUri()); if (path.matches( "/nodes/v2/stats")) return stats(); throw new NotFoundException("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/state/ready/{hostname}")) { nodeRepository.nodes().markNodeAvailableForNewAllocation(path.get("hostname"), Agent.operator, "Readied through the nodes/v2 API"); return new MessageResponse("Moved " + path.get("hostname") + " to ready"); } else if (path.matches("/nodes/v2/state/failed/{hostname}")) { List<Node> failedNodes = nodeRepository.nodes().failRecursively(path.get("hostname"), Agent.operator, "Failed through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(failedNodes) + " to failed"); } else if (path.matches("/nodes/v2/state/parked/{hostname}")) { List<Node> parkedNodes = nodeRepository.nodes().parkRecursively(path.get("hostname"), Agent.operator, "Parked through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(parkedNodes) + " to parked"); } else if (path.matches("/nodes/v2/state/dirty/{hostname}")) { List<Node> dirtiedNodes = nodeRepository.nodes().deallocateRecursively(path.get("hostname"), Agent.operator, "Dirtied through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(dirtiedNodes) + " to dirty"); } else if (path.matches("/nodes/v2/state/active/{hostname}")) { nodeRepository.nodes().reactivate(path.get("hostname"), Agent.operator, "Reactivated through nodes/v2 API"); return new MessageResponse("Moved " + path.get("hostname") + " to active"); } else if (path.matches("/nodes/v2/state/breakfixed/{hostname}")) { List<Node> breakfixedNodes = nodeRepository.nodes().breakfixRecursively(path.get("hostname"), Agent.operator, "Breakfixed through the nodes/v2 API"); return new MessageResponse("Breakfixed " + hostnamesAsString(breakfixedNodes)); } throw new NotFoundException("Cannot put to path '" + path + "'"); } private HttpResponse handlePATCH(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/node/{hostname}")) { try (NodePatcher patcher = new NodePatcher(nodeFlavors, request.getData(), nodeFromHostname(path.get("hostname")), nodeRepository)) { var patchedNodes = patcher.apply(); nodeRepository.nodes().write(patchedNodes, patcher.nodeMutexOfHost()); return new MessageResponse("Updated " + patcher.nodeMutexOfHost().node().hostname()); } } else if (path.matches("/nodes/v2/application/{applicationId}")) { try (ApplicationPatcher patcher = new ApplicationPatcher(request.getData(), ApplicationId.fromFullString(path.get("applicationId")), nodeRepository)) { nodeRepository.applications().put(patcher.apply(), patcher.lock()); return new MessageResponse("Updated " + patcher.application()); } } else if (path.matches("/nodes/v2/archive/{tenant}")) { String uri = requiredField(toSlime(request), "uri", Inspector::asString); return setTenantArchiveUri(path.get("tenant"), Optional.of(uri)); } else if (path.matches("/nodes/v2/upgrade/{nodeType}")) { return setTargetVersions(path.get("nodeType"), toSlime(request)); } throw new NotFoundException("Nothing at '" + path + "'"); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/command/restart")) { int restartCount = nodeRepository.nodes().restart(toNodeFilter(request)).size(); return new MessageResponse("Scheduled restart of " + restartCount + " matching nodes"); } if (path.matches("/nodes/v2/command/reboot")) { int rebootCount = nodeRepository.nodes().reboot(toNodeFilter(request)).size(); return new MessageResponse("Scheduled reboot of " + rebootCount + " matching nodes"); } if (path.matches("/nodes/v2/node")) { int addedNodes = addNodes(toSlime(request)); return new MessageResponse("Added " + addedNodes + " nodes to the provisioned state"); } if (path.matches("/nodes/v2/maintenance/run/{job}")) return runJob(path.get("job")); if (path.matches("/nodes/v2/upgrade/firmware")) return requestFirmwareCheckResponse(); throw new NotFoundException("Nothing at path '" + request.getUri().getPath() + "'"); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/node/{hostname}")) return deleteNode(path.get("hostname")); if (path.matches("/nodes/v2/archive/{tenant}")) return setTenantArchiveUri(path.get("tenant"), Optional.empty()); if (path.matches("/nodes/v2/upgrade/firmware")) return cancelFirmwareCheckResponse(); throw new NotFoundException("Nothing at path '" + request.getUri().getPath() + "'"); } private HttpResponse runJob(String job) { nodeRepository.jobControl().run(job); return new MessageResponse("Executed job '" + job + "'"); } private HttpResponse deleteNode(String hostname) { Optional<NodeMutex> nodeMutex = nodeRepository.nodes().lockAndGet(hostname); if (nodeMutex.isEmpty()) throw new NotFoundException("No node with hostname '" + hostname + "'"); try (var lock = nodeMutex.get()) { if (lock.node().state() == Node.State.deprovisioned) { nodeRepository.nodes().forget(lock.node()); return new MessageResponse("Permanently removed " + hostname); } else { List<Node> removedNodes = nodeRepository.nodes().removeRecursively(hostname); return new MessageResponse("Removed " + removedNodes.stream().map(Node::hostname).collect(Collectors.joining(", "))); } } } private Node nodeFromHostname(String hostname) { return nodeRepository.nodes().node(hostname).orElseThrow(() -> new NotFoundException("No node found with hostname " + hostname)); } public int addNodes(Inspector inspector) { List<Node> nodes = createNodesFromSlime(inspector); return nodeRepository.nodes().addNodes(nodes, Agent.operator).size(); } private Inspector toSlime(HttpRequest request) { try { byte[] jsonBytes = IOUtils.readBytes(request.getData(), 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes).get(); } catch (IOException e) { throw new UncheckedIOException(e); } } private List<Node> createNodesFromSlime(Inspector object) { List<Node> nodes = new ArrayList<>(); object.traverse((ArrayTraverser) (int i, Inspector item) -> nodes.add(createNode(item))); return nodes; } private Node createNode(Inspector inspector) { Set<String> ipAddresses = new HashSet<>(); inspector.field("ipAddresses").traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); Set<String> ipAddressPool = new HashSet<>(); inspector.field("additionalIpAddresses").traverse((ArrayTraverser) (i, item) -> ipAddressPool.add(item.asString())); List<Address> addressPool = new ArrayList<>(); inspector.field("additionalHostnames").traverse((ArrayTraverser) (i, item) -> addressPool.add(new Address(item.asString()))); Node.Builder builder = Node.create(inspector.field("openStackId").asString(), IP.Config.of(ipAddresses, ipAddressPool, addressPool), inspector.field("hostname").asString(), flavorFromSlime(inspector), nodeTypeFromSlime(inspector.field("type"))); optionalString(inspector.field("parentHostname")).ifPresent(builder::parentHostname); optionalString(inspector.field("modelName")).ifPresent(builder::modelName); optionalString(inspector.field("reservedTo")).map(TenantName::from).ifPresent(builder::reservedTo); optionalString(inspector.field("exclusiveTo")).map(ApplicationId::fromSerializedForm).ifPresent(builder::exclusiveTo); optionalString(inspector.field("switchHostname")).ifPresent(builder::switchHostname); return builder.build(); } private Flavor flavorFromSlime(Inspector inspector) { Inspector flavorInspector = inspector.field("flavor"); Inspector resourcesInspector = inspector.field("resources"); if ( ! flavorInspector.valid()) { return new Flavor(new NodeResources( requiredField(resourcesInspector, "vcpu", Inspector::asDouble), requiredField(resourcesInspector, "memoryGb", Inspector::asDouble), requiredField(resourcesInspector, "diskGb", Inspector::asDouble), requiredField(resourcesInspector, "bandwidthGbps", Inspector::asDouble), optionalString(resourcesInspector.field("diskSpeed")).map(NodeResourcesSerializer::diskSpeedFrom).orElse(NodeResources.DiskSpeed.getDefault()), optionalString(resourcesInspector.field("storageType")).map(NodeResourcesSerializer::storageTypeFrom).orElse(NodeResources.StorageType.getDefault()))); } Flavor flavor = nodeFlavors.getFlavorOrThrow(flavorInspector.asString()); if (resourcesInspector.valid()) { if (resourcesInspector.field("vcpu").valid()) flavor = flavor.with(flavor.resources().withVcpu(resourcesInspector.field("vcpu").asDouble())); if (resourcesInspector.field("memoryGb").valid()) flavor = flavor.with(flavor.resources().withMemoryGb(resourcesInspector.field("memoryGb").asDouble())); if (resourcesInspector.field("diskGb").valid()) flavor = flavor.with(flavor.resources().withDiskGb(resourcesInspector.field("diskGb").asDouble())); if (resourcesInspector.field("bandwidthGbps").valid()) flavor = flavor.with(flavor.resources().withBandwidthGbps(resourcesInspector.field("bandwidthGbps").asDouble())); if (resourcesInspector.field("diskSpeed").valid()) flavor = flavor.with(flavor.resources().with(NodeResourcesSerializer.diskSpeedFrom(resourcesInspector.field("diskSpeed").asString()))); if (resourcesInspector.field("storageType").valid()) flavor = flavor.with(flavor.resources().with(NodeResourcesSerializer.storageTypeFrom(resourcesInspector.field("storageType").asString()))); } return flavor; } private static <T> T requiredField(Inspector inspector, String fieldName, Function<Inspector, T> valueExtractor) { Inspector field = inspector.field(fieldName); if (!field.valid()) throw new IllegalArgumentException("Required field '" + fieldName + "' is missing"); return valueExtractor.apply(field); } private NodeType nodeTypeFromSlime(Inspector object) { if (! object.valid()) return NodeType.tenant; return NodeSerializer.typeFrom(object.asString()); } public static NodeFilter toNodeFilter(HttpRequest request) { NodeFilter filter = NodeHostFilter.from(HostFilter.from(request.getProperty("hostname"), request.getProperty("flavor"), request.getProperty("clusterType"), request.getProperty("clusterId"))); filter = ApplicationFilter.from(request.getProperty("application"), filter); filter = StateFilter.from(request.getProperty("state"), request.getBooleanProperty("includeDeprovisioned"), filter); filter = NodeTypeFilter.from(request.getProperty("type"), filter); filter = ParentHostFilter.from(request.getProperty("parentHost"), filter); filter = NodeOsVersionFilter.from(request.getProperty("osVersion"), filter); return filter; } private static boolean isPatchOverride(HttpRequest request) { String override = request.getHeader("X-HTTP-Method-Override"); if (override != null) { if (override.equals("PATCH")) { return true; } else { String msg = String.format("Illegal X-HTTP-Method-Override header for POST request. Accepts 'PATCH' but got '%s'", override); throw new IllegalArgumentException(msg); } } return false; } private MessageResponse setTargetVersions(String nodeTypeS, Inspector inspector) { NodeType nodeType = NodeType.valueOf(nodeTypeS.toLowerCase()); List<String> messageParts = new ArrayList<>(4); boolean force = inspector.field("force").asBool(); Inspector versionField = inspector.field("version"); Inspector osVersionField = inspector.field("osVersion"); Inspector containerImageField = inspector.field("dockerImage"); Inspector upgradeBudgetField = inspector.field("upgradeBudget"); if (versionField.valid()) { Version version = Version.fromString(versionField.asString()); nodeRepository.infrastructureVersions().setTargetVersion(nodeType, version, force); messageParts.add("version to " + version.toFullString()); } if (osVersionField.valid()) { String v = osVersionField.asString(); if (v.isEmpty()) { nodeRepository.osVersions().removeTarget(nodeType); messageParts.add("osVersion to null"); } else { Version osVersion = Version.fromString(v); Optional<Duration> upgradeBudget = Optional.of(upgradeBudgetField) .filter(Inspector::valid) .map(Inspector::asString) .map(s -> { try { return Duration.parse(s); } catch (Exception e) { throw new IllegalArgumentException("Invalid duration '" + s + "'", e); } }); nodeRepository.osVersions().setTarget(nodeType, osVersion, upgradeBudget, force); messageParts.add("osVersion to " + osVersion.toFullString()); upgradeBudget.ifPresent(d -> messageParts.add("upgradeBudget to " + d)); } } if (containerImageField.valid()) { Optional<DockerImage> dockerImage = Optional.of(containerImageField.asString()) .filter(s -> !s.isEmpty()) .map(DockerImage::fromString); nodeRepository.containerImages().setImage(nodeType, dockerImage); messageParts.add("container image to " + dockerImage.map(DockerImage::asString).orElse(null)); } if (messageParts.isEmpty()) { throw new IllegalArgumentException("At least one of 'version', 'osVersion' or 'dockerImage' must be set"); } return new MessageResponse("Set " + String.join(", ", messageParts) + " for nodes of type " + nodeType); } private MessageResponse cancelFirmwareCheckResponse() { nodeRepository.firmwareChecks().cancel(); return new MessageResponse("Cancelled outstanding requests for firmware checks"); } private MessageResponse requestFirmwareCheckResponse() { nodeRepository.firmwareChecks().request(); return new MessageResponse("Will request firmware checks on all hosts."); } private HttpResponse setTenantArchiveUri(String tenant, Optional<String> archiveUri) { nodeRepository.archiveUris().setArchiveUri(TenantName.from(tenant), archiveUri); return new MessageResponse(archiveUri.map(a -> "Updated").orElse("Removed") + " archive URI for " + tenant); } private static String hostnamesAsString(List<Node> nodes) { return nodes.stream().map(Node::hostname).sorted().collect(Collectors.joining(", ")); } private HttpResponse applicationList(URI uri) { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor applications = root.setArray("applications"); for (ApplicationId id : nodeRepository.applications().ids()) { Cursor application = applications.addObject(); application.setString("url", withPath("/nodes/v2/application/" + id.toFullString(), uri).toString()); application.setString("id", id.toFullString()); } return new SlimeJsonResponse(slime); } private HttpResponse application(String idString, URI uri) { ApplicationId id = ApplicationId.fromFullString(idString); Optional<Application> application = nodeRepository.applications().get(id); if (application.isEmpty()) return ErrorResponse.notFoundError("No application '" + id + "'"); Slime slime = ApplicationSerializer.toSlime(application.get(), nodeRepository.nodes().list(Node.State.active).owner(id), metricsDb, nodeRepository, withPath("/nodes/v2/applications/" + id, uri)); return new SlimeJsonResponse(slime); } private void toSlime(Load load, Cursor object) { object.setDouble("cpu", load.cpu()); object.setDouble("memory", load.memory()); object.setDouble("disk", load.disk()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } }
class NodesV2ApiHandler extends LoggingRequestHandler { private final Orchestrator orchestrator; private final NodeRepository nodeRepository; private final MetricsDb metricsDb; private final NodeFlavors nodeFlavors; @Inject public NodesV2ApiHandler(LoggingRequestHandler.Context parentCtx, Orchestrator orchestrator, NodeRepository nodeRepository, MetricsDb metricsDb, NodeFlavors flavors) { super(parentCtx); this.orchestrator = orchestrator; this.nodeRepository = nodeRepository; this.metricsDb = metricsDb; this.nodeFlavors = flavors; } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return isPatchOverride(request) ? handlePATCH(request) : handlePOST(request); case DELETE: return handleDELETE(request); case PATCH: return handlePATCH(request); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (NotFoundException | NoSuchNodeException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri()); String pathS = request.getUri().getPath(); if (path.matches( "/nodes/v2")) return new ResourceResponse(request.getUri(), "node", "state", "acl", "command", "archive", "locks", "maintenance", "upgrade", "capacity", "application", "stats"); if (path.matches( "/nodes/v2/node")) return new NodesResponse(ResponseType.nodeList, request, orchestrator, nodeRepository); if (pathS.startsWith("/nodes/v2/node/")) return new NodesResponse(ResponseType.singleNode, request, orchestrator, nodeRepository); if (path.matches( "/nodes/v2/state")) return new NodesResponse(ResponseType.stateList, request, orchestrator, nodeRepository); if (pathS.startsWith("/nodes/v2/state/")) return new NodesResponse(ResponseType.nodesInStateList, request, orchestrator, nodeRepository); if (path.matches( "/nodes/v2/acl/{hostname}")) return new NodeAclResponse(request, nodeRepository, path.get("hostname")); if (path.matches( "/nodes/v2/command")) return new ResourceResponse(request.getUri(), "restart", "reboot"); if (path.matches( "/nodes/v2/archive")) return new ArchiveResponse(nodeRepository); if (path.matches( "/nodes/v2/locks")) return new LocksResponse(); if (path.matches( "/nodes/v2/maintenance")) return new JobsResponse(nodeRepository.jobControl()); if (path.matches( "/nodes/v2/upgrade")) return new UpgradeResponse(nodeRepository.infrastructureVersions(), nodeRepository.osVersions(), nodeRepository.containerImages()); if (path.matches( "/nodes/v2/capacity")) return new HostCapacityResponse(nodeRepository, request); if (path.matches( "/nodes/v2/application")) return applicationList(request.getUri()); if (path.matches( "/nodes/v2/application/{applicationId}")) return application(path.get("applicationId"), request.getUri()); if (path.matches( "/nodes/v2/stats")) return stats(); throw new NotFoundException("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/state/ready/{hostname}")) { nodeRepository.nodes().markNodeAvailableForNewAllocation(path.get("hostname"), Agent.operator, "Readied through the nodes/v2 API"); return new MessageResponse("Moved " + path.get("hostname") + " to ready"); } else if (path.matches("/nodes/v2/state/failed/{hostname}")) { List<Node> failedNodes = nodeRepository.nodes().failRecursively(path.get("hostname"), Agent.operator, "Failed through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(failedNodes) + " to failed"); } else if (path.matches("/nodes/v2/state/parked/{hostname}")) { List<Node> parkedNodes = nodeRepository.nodes().parkRecursively(path.get("hostname"), Agent.operator, "Parked through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(parkedNodes) + " to parked"); } else if (path.matches("/nodes/v2/state/dirty/{hostname}")) { List<Node> dirtiedNodes = nodeRepository.nodes().deallocateRecursively(path.get("hostname"), Agent.operator, "Dirtied through the nodes/v2 API"); return new MessageResponse("Moved " + hostnamesAsString(dirtiedNodes) + " to dirty"); } else if (path.matches("/nodes/v2/state/active/{hostname}")) { nodeRepository.nodes().reactivate(path.get("hostname"), Agent.operator, "Reactivated through nodes/v2 API"); return new MessageResponse("Moved " + path.get("hostname") + " to active"); } else if (path.matches("/nodes/v2/state/breakfixed/{hostname}")) { List<Node> breakfixedNodes = nodeRepository.nodes().breakfixRecursively(path.get("hostname"), Agent.operator, "Breakfixed through the nodes/v2 API"); return new MessageResponse("Breakfixed " + hostnamesAsString(breakfixedNodes)); } throw new NotFoundException("Cannot put to path '" + path + "'"); } private HttpResponse handlePATCH(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/node/{hostname}")) { try (NodePatcher patcher = new NodePatcher(nodeFlavors, request.getData(), nodeFromHostname(path.get("hostname")), nodeRepository)) { var patchedNodes = patcher.apply(); nodeRepository.nodes().write(patchedNodes, patcher.nodeMutexOfHost()); return new MessageResponse("Updated " + patcher.nodeMutexOfHost().node().hostname()); } } else if (path.matches("/nodes/v2/application/{applicationId}")) { try (ApplicationPatcher patcher = new ApplicationPatcher(request.getData(), ApplicationId.fromFullString(path.get("applicationId")), nodeRepository)) { nodeRepository.applications().put(patcher.apply(), patcher.lock()); return new MessageResponse("Updated " + patcher.application()); } } else if (path.matches("/nodes/v2/archive/{tenant}")) { String uri = requiredField(toSlime(request), "uri", Inspector::asString); return setTenantArchiveUri(path.get("tenant"), Optional.of(uri)); } else if (path.matches("/nodes/v2/upgrade/{nodeType}")) { return setTargetVersions(path.get("nodeType"), toSlime(request)); } throw new NotFoundException("Nothing at '" + path + "'"); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/command/restart")) { int restartCount = nodeRepository.nodes().restart(toNodeFilter(request)).size(); return new MessageResponse("Scheduled restart of " + restartCount + " matching nodes"); } if (path.matches("/nodes/v2/command/reboot")) { int rebootCount = nodeRepository.nodes().reboot(toNodeFilter(request)).size(); return new MessageResponse("Scheduled reboot of " + rebootCount + " matching nodes"); } if (path.matches("/nodes/v2/node")) { int addedNodes = addNodes(toSlime(request)); return new MessageResponse("Added " + addedNodes + " nodes to the provisioned state"); } if (path.matches("/nodes/v2/maintenance/run/{job}")) return runJob(path.get("job")); if (path.matches("/nodes/v2/upgrade/firmware")) return requestFirmwareCheckResponse(); throw new NotFoundException("Nothing at path '" + request.getUri().getPath() + "'"); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri()); if (path.matches("/nodes/v2/node/{hostname}")) return deleteNode(path.get("hostname")); if (path.matches("/nodes/v2/archive/{tenant}")) return setTenantArchiveUri(path.get("tenant"), Optional.empty()); if (path.matches("/nodes/v2/upgrade/firmware")) return cancelFirmwareCheckResponse(); throw new NotFoundException("Nothing at path '" + request.getUri().getPath() + "'"); } private HttpResponse runJob(String job) { nodeRepository.jobControl().run(job); return new MessageResponse("Executed job '" + job + "'"); } private HttpResponse deleteNode(String hostname) { Optional<NodeMutex> nodeMutex = nodeRepository.nodes().lockAndGet(hostname); if (nodeMutex.isEmpty()) throw new NotFoundException("No node with hostname '" + hostname + "'"); try (var lock = nodeMutex.get()) { if (lock.node().state() == Node.State.deprovisioned) { nodeRepository.nodes().forget(lock.node()); return new MessageResponse("Permanently removed " + hostname); } else { List<Node> removedNodes = nodeRepository.nodes().removeRecursively(hostname); return new MessageResponse("Removed " + removedNodes.stream().map(Node::hostname).collect(Collectors.joining(", "))); } } } private Node nodeFromHostname(String hostname) { return nodeRepository.nodes().node(hostname).orElseThrow(() -> new NotFoundException("No node found with hostname " + hostname)); } public int addNodes(Inspector inspector) { List<Node> nodes = createNodesFromSlime(inspector); return nodeRepository.nodes().addNodes(nodes, Agent.operator).size(); } private Inspector toSlime(HttpRequest request) { try { byte[] jsonBytes = IOUtils.readBytes(request.getData(), 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes).get(); } catch (IOException e) { throw new UncheckedIOException(e); } } private List<Node> createNodesFromSlime(Inspector object) { List<Node> nodes = new ArrayList<>(); object.traverse((ArrayTraverser) (int i, Inspector item) -> nodes.add(createNode(item))); return nodes; } private Node createNode(Inspector inspector) { Set<String> ipAddresses = new HashSet<>(); inspector.field("ipAddresses").traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); Set<String> ipAddressPool = new HashSet<>(); inspector.field("additionalIpAddresses").traverse((ArrayTraverser) (i, item) -> ipAddressPool.add(item.asString())); List<Address> addressPool = new ArrayList<>(); inspector.field("additionalHostnames").traverse((ArrayTraverser) (i, item) -> addressPool.add(new Address(item.asString()))); Node.Builder builder = Node.create(inspector.field("openStackId").asString(), IP.Config.of(ipAddresses, ipAddressPool, addressPool), inspector.field("hostname").asString(), flavorFromSlime(inspector), nodeTypeFromSlime(inspector.field("type"))); optionalString(inspector.field("parentHostname")).ifPresent(builder::parentHostname); optionalString(inspector.field("modelName")).ifPresent(builder::modelName); optionalString(inspector.field("reservedTo")).map(TenantName::from).ifPresent(builder::reservedTo); optionalString(inspector.field("exclusiveTo")).map(ApplicationId::fromSerializedForm).ifPresent(builder::exclusiveTo); optionalString(inspector.field("switchHostname")).ifPresent(builder::switchHostname); return builder.build(); } private Flavor flavorFromSlime(Inspector inspector) { Inspector flavorInspector = inspector.field("flavor"); Inspector resourcesInspector = inspector.field("resources"); if ( ! flavorInspector.valid()) { return new Flavor(new NodeResources( requiredField(resourcesInspector, "vcpu", Inspector::asDouble), requiredField(resourcesInspector, "memoryGb", Inspector::asDouble), requiredField(resourcesInspector, "diskGb", Inspector::asDouble), requiredField(resourcesInspector, "bandwidthGbps", Inspector::asDouble), optionalString(resourcesInspector.field("diskSpeed")).map(NodeResourcesSerializer::diskSpeedFrom).orElse(NodeResources.DiskSpeed.getDefault()), optionalString(resourcesInspector.field("storageType")).map(NodeResourcesSerializer::storageTypeFrom).orElse(NodeResources.StorageType.getDefault()))); } Flavor flavor = nodeFlavors.getFlavorOrThrow(flavorInspector.asString()); if (resourcesInspector.valid()) { if (resourcesInspector.field("vcpu").valid()) flavor = flavor.with(flavor.resources().withVcpu(resourcesInspector.field("vcpu").asDouble())); if (resourcesInspector.field("memoryGb").valid()) flavor = flavor.with(flavor.resources().withMemoryGb(resourcesInspector.field("memoryGb").asDouble())); if (resourcesInspector.field("diskGb").valid()) flavor = flavor.with(flavor.resources().withDiskGb(resourcesInspector.field("diskGb").asDouble())); if (resourcesInspector.field("bandwidthGbps").valid()) flavor = flavor.with(flavor.resources().withBandwidthGbps(resourcesInspector.field("bandwidthGbps").asDouble())); if (resourcesInspector.field("diskSpeed").valid()) flavor = flavor.with(flavor.resources().with(NodeResourcesSerializer.diskSpeedFrom(resourcesInspector.field("diskSpeed").asString()))); if (resourcesInspector.field("storageType").valid()) flavor = flavor.with(flavor.resources().with(NodeResourcesSerializer.storageTypeFrom(resourcesInspector.field("storageType").asString()))); } return flavor; } private static <T> T requiredField(Inspector inspector, String fieldName, Function<Inspector, T> valueExtractor) { Inspector field = inspector.field(fieldName); if (!field.valid()) throw new IllegalArgumentException("Required field '" + fieldName + "' is missing"); return valueExtractor.apply(field); } private NodeType nodeTypeFromSlime(Inspector object) { if (! object.valid()) return NodeType.tenant; return NodeSerializer.typeFrom(object.asString()); } public static NodeFilter toNodeFilter(HttpRequest request) { NodeFilter filter = NodeHostFilter.from(HostFilter.from(request.getProperty("hostname"), request.getProperty("flavor"), request.getProperty("clusterType"), request.getProperty("clusterId"))); filter = ApplicationFilter.from(request.getProperty("application"), filter); filter = StateFilter.from(request.getProperty("state"), request.getBooleanProperty("includeDeprovisioned"), filter); filter = NodeTypeFilter.from(request.getProperty("type"), filter); filter = ParentHostFilter.from(request.getProperty("parentHost"), filter); filter = NodeOsVersionFilter.from(request.getProperty("osVersion"), filter); return filter; } private static boolean isPatchOverride(HttpRequest request) { String override = request.getHeader("X-HTTP-Method-Override"); if (override != null) { if (override.equals("PATCH")) { return true; } else { String msg = String.format("Illegal X-HTTP-Method-Override header for POST request. Accepts 'PATCH' but got '%s'", override); throw new IllegalArgumentException(msg); } } return false; } private MessageResponse setTargetVersions(String nodeTypeS, Inspector inspector) { NodeType nodeType = NodeType.valueOf(nodeTypeS.toLowerCase()); List<String> messageParts = new ArrayList<>(4); boolean force = inspector.field("force").asBool(); Inspector versionField = inspector.field("version"); Inspector osVersionField = inspector.field("osVersion"); Inspector containerImageField = inspector.field("dockerImage"); Inspector upgradeBudgetField = inspector.field("upgradeBudget"); if (versionField.valid()) { Version version = Version.fromString(versionField.asString()); nodeRepository.infrastructureVersions().setTargetVersion(nodeType, version, force); messageParts.add("version to " + version.toFullString()); } if (osVersionField.valid()) { String v = osVersionField.asString(); if (v.isEmpty()) { nodeRepository.osVersions().removeTarget(nodeType); messageParts.add("osVersion to null"); } else { Version osVersion = Version.fromString(v); Optional<Duration> upgradeBudget = Optional.of(upgradeBudgetField) .filter(Inspector::valid) .map(Inspector::asString) .map(s -> { try { return Duration.parse(s); } catch (Exception e) { throw new IllegalArgumentException("Invalid duration '" + s + "'", e); } }); nodeRepository.osVersions().setTarget(nodeType, osVersion, upgradeBudget, force); messageParts.add("osVersion to " + osVersion.toFullString()); upgradeBudget.ifPresent(d -> messageParts.add("upgradeBudget to " + d)); } } if (containerImageField.valid()) { Optional<DockerImage> dockerImage = Optional.of(containerImageField.asString()) .filter(s -> !s.isEmpty()) .map(DockerImage::fromString); nodeRepository.containerImages().setImage(nodeType, dockerImage); messageParts.add("container image to " + dockerImage.map(DockerImage::asString).orElse(null)); } if (messageParts.isEmpty()) { throw new IllegalArgumentException("At least one of 'version', 'osVersion' or 'dockerImage' must be set"); } return new MessageResponse("Set " + String.join(", ", messageParts) + " for nodes of type " + nodeType); } private MessageResponse cancelFirmwareCheckResponse() { nodeRepository.firmwareChecks().cancel(); return new MessageResponse("Cancelled outstanding requests for firmware checks"); } private MessageResponse requestFirmwareCheckResponse() { nodeRepository.firmwareChecks().request(); return new MessageResponse("Will request firmware checks on all hosts."); } private HttpResponse setTenantArchiveUri(String tenant, Optional<String> archiveUri) { nodeRepository.archiveUris().setArchiveUri(TenantName.from(tenant), archiveUri); return new MessageResponse(archiveUri.map(a -> "Updated").orElse("Removed") + " archive URI for " + tenant); } private static String hostnamesAsString(List<Node> nodes) { return nodes.stream().map(Node::hostname).sorted().collect(Collectors.joining(", ")); } private HttpResponse applicationList(URI uri) { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor applications = root.setArray("applications"); for (ApplicationId id : nodeRepository.applications().ids()) { Cursor application = applications.addObject(); application.setString("url", withPath("/nodes/v2/application/" + id.toFullString(), uri).toString()); application.setString("id", id.toFullString()); } return new SlimeJsonResponse(slime); } private HttpResponse application(String idString, URI uri) { ApplicationId id = ApplicationId.fromFullString(idString); Optional<Application> application = nodeRepository.applications().get(id); if (application.isEmpty()) return ErrorResponse.notFoundError("No application '" + id + "'"); Slime slime = ApplicationSerializer.toSlime(application.get(), nodeRepository.nodes().list(Node.State.active).owner(id), metricsDb, nodeRepository, withPath("/nodes/v2/applications/" + id, uri)); return new SlimeJsonResponse(slime); } private void toSlime(Load load, Cursor object) { object.setDouble("cpu", load.cpu()); object.setDouble("memory", load.memory()); object.setDouble("disk", load.disk()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } }
reach ```suggestion for (ZoneId zone : controller.zoneRegistry().zones().reachable().ids()) { ```
private static Slime toSlime(Controller controller) { try { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor zonesArray = root.setArray("zones"); for (ZoneId zone : controller.zoneRegistry().zones().all().ids()) { NodeRepoStats stats = controller.serviceRegistry().configServer().nodeRepository().getStats(zone); if (stats.applicationStats().isEmpty()) continue; Cursor zoneObject = zonesArray.addObject(); zoneObject.setString("id", zone.toString()); toSlime(stats.load(), zoneObject.setObject("load")); toSlime(stats.load(), zoneObject.setObject("activeLoad")); Cursor applicationsArray = zoneObject.setArray("applications"); for (var applicationStats : stats.applicationStats()) toSlime(applicationStats, applicationsArray.addObject()); } return slime; } catch (Exception e) { e.printStackTrace(); throw e; } }
for (ZoneId zone : controller.zoneRegistry().zones().all().ids()) {
private static Slime toSlime(Controller controller) { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor zonesArray = root.setArray("zones"); for (ZoneId zone : controller.zoneRegistry().zones().reachable().ids()) { NodeRepoStats stats = controller.serviceRegistry().configServer().nodeRepository().getStats(zone); if (stats.applicationStats().isEmpty()) continue; Cursor zoneObject = zonesArray.addObject(); zoneObject.setString("id", zone.toString()); toSlime(stats.load(), zoneObject.setObject("load")); toSlime(stats.activeLoad(), zoneObject.setObject("activeLoad")); Cursor applicationsArray = zoneObject.setArray("applications"); for (var applicationStats : stats.applicationStats()) toSlime(applicationStats, applicationsArray.addObject()); } return slime; }
class StatsResponse extends SlimeJsonResponse { public StatsResponse(Controller controller) { super(toSlime(controller)); } private static void toSlime(ApplicationStats stats, Cursor applicationObject) { applicationObject.setString("id", stats.id().toFullString()); toSlime(stats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", stats.cost()); applicationObject.setDouble("unutilizedCost", stats.unutilizedCost()); } private static void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } }
class StatsResponse extends SlimeJsonResponse { public StatsResponse(Controller controller) { super(toSlime(controller)); } private static void toSlime(ApplicationStats stats, Cursor applicationObject) { applicationObject.setString("id", stats.id().toFullString()); toSlime(stats.load(), applicationObject.setObject("load")); applicationObject.setDouble("cost", stats.cost()); applicationObject.setDouble("unutilizedCost", stats.unutilizedCost()); } private static void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } }
Yes, this was to the point :)
protected InvokerResult getSearchResult(Execution execution) throws IOException { InvokerResult result = new InvokerResult(query, query.getHits()); List<LeanHit> merged = Collections.emptyList(); long nextTimeout = query.getTimeLeft(); boolean extraDebug = (query.getOffset() == 0) && (query.getHits() == 7) && log.isLoggable(java.util.logging.Level.FINE); List<InvokerResult> processed = new ArrayList<>(); try { while (!invokers.isEmpty() && nextTimeout >= 0) { SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS); if (invoker == null) { log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received"); break; } else { InvokerResult toMerge = invoker.getSearchResult(execution); if (extraDebug) { processed.add(toMerge); } merged = mergeResult(result.getResult(), toMerge, merged); ejectInvoker(invoker); } nextTimeout = nextTimeout(); } } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for search results", e); } insertNetworkErrors(result.getResult()); result.getResult().setCoverage(createCoverage()); if (extraDebug && merged.size() > 0) { int firstPartId = merged.get(0).getPartId(); for (int index = 1; index < merged.size(); index++) { if (merged.get(index).getPartId() != firstPartId) { extraDebug = false; log.fine("merged["+index+"/"+merged.size()+"] from partId "+merged.get(index).getPartId()+", first "+firstPartId); break; } } } if (extraDebug) { log.fine("Interleaved "+processed.size()+" results"); for (int pIdx = 0; pIdx < processed.size(); ++pIdx) { var p = processed.get(pIdx); log.fine("InvokerResult "+pIdx+" total hits "+p.getResult().getTotalHitCount()); var lean = p.getLeanHits(); for (int idx = 0; idx < lean.size(); ++idx) { var hit = lean.get(idx); log.fine("lean hit "+idx+" relevance "+hit.getRelevance()+" partid "+hit.getPartId()); } } for (int mIdx = 0; mIdx < merged.size(); ++mIdx) { var hit = merged.get(mIdx); log.fine("merged hit "+mIdx+" relevance "+hit.getRelevance()+" partid "+hit.getPartId()); } } int needed = query.getOffset() + query.getHits(); for (int index = query.getOffset(); (index < merged.size()) && (index < needed); index++) { result.getLeanHits().add(merged.get(index)); } query.setOffset(0); return result; }
boolean extraDebug = (query.getOffset() == 0) && (query.getHits() == 7) && log.isLoggable(java.util.logging.Level.FINE);
protected InvokerResult getSearchResult(Execution execution) throws IOException { InvokerResult result = new InvokerResult(query, query.getHits()); List<LeanHit> merged = Collections.emptyList(); long nextTimeout = query.getTimeLeft(); boolean extraDebug = (query.getOffset() == 0) && (query.getHits() == 7) && log.isLoggable(java.util.logging.Level.FINE); List<InvokerResult> processed = new ArrayList<>(); try { while (!invokers.isEmpty() && nextTimeout >= 0) { SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS); if (invoker == null) { log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received"); break; } else { InvokerResult toMerge = invoker.getSearchResult(execution); if (extraDebug) { processed.add(toMerge); } merged = mergeResult(result.getResult(), toMerge, merged); ejectInvoker(invoker); } nextTimeout = nextTimeout(); } } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for search results", e); } insertNetworkErrors(result.getResult()); result.getResult().setCoverage(createCoverage()); if (extraDebug && merged.size() > 0) { int firstPartId = merged.get(0).getPartId(); for (int index = 1; index < merged.size(); index++) { if (merged.get(index).getPartId() != firstPartId) { extraDebug = false; log.fine("merged["+index+"/"+merged.size()+"] from partId "+merged.get(index).getPartId()+", first "+firstPartId); break; } } } if (extraDebug) { log.fine("Interleaved "+processed.size()+" results"); for (int pIdx = 0; pIdx < processed.size(); ++pIdx) { var p = processed.get(pIdx); log.fine("InvokerResult "+pIdx+" total hits "+p.getResult().getTotalHitCount()); var lean = p.getLeanHits(); for (int idx = 0; idx < lean.size(); ++idx) { var hit = lean.get(idx); log.fine("lean hit "+idx+" relevance "+hit.getRelevance()+" partid "+hit.getPartId()); } } for (int mIdx = 0; mIdx < merged.size(); ++mIdx) { var hit = merged.get(mIdx); log.fine("merged hit "+mIdx+" relevance "+hit.getRelevance()+" partid "+hit.getPartId()); } } int needed = query.getOffset() + query.getHits(); for (int index = query.getOffset(); (index < merged.size()) && (index < needed); index++) { result.getLeanHits().add(merged.get(index)); } query.setOffset(0); return result; }
class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> { private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName()); private final Set<SearchInvoker> invokers; private final SearchCluster searchCluster; private final LinkedBlockingQueue<SearchInvoker> availableForProcessing; private final Set<Integer> alreadyFailedNodes; private final boolean isContentWellBalanced; private Query query; private boolean adaptiveTimeoutCalculated = false; private long adaptiveTimeoutMin = 0; private long adaptiveTimeoutMax = 0; private long deadline = 0; private long answeredDocs = 0; private long answeredActiveDocs = 0; private long answeredSoonActiveDocs = 0; private int askedNodes = 0; private int answeredNodes = 0; private int answeredNodesParticipated = 0; private boolean timedOut = false; private boolean degradedByMatchPhase = false; public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, boolean isContentWellBalanced, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) { super(Optional.empty()); this.invokers = Collections.newSetFromMap(new IdentityHashMap<>()); this.invokers.addAll(invokers); this.searchCluster = searchCluster; this.availableForProcessing = newQueue(); this.alreadyFailedNodes = alreadyFailedNodes; this.isContentWellBalanced = isContentWellBalanced; } /** * Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search * query has an offset other than zero, it will be reset to zero and the expected hit amount * will be adjusted accordingly. */ @Override protected Object sendSearchRequest(Query query, Object unusedContext) throws IOException { this.query = query; invokers.forEach(invoker -> invoker.setMonitor(this)); deadline = currentTime() + query.getTimeLeft(); int originalHits = query.getHits(); int originalOffset = query.getOffset(); int neededHits = originalHits + originalOffset; int q = neededHits; if (isContentWellBalanced) { Double topkProbabilityOverrride = query.properties().getDouble(Dispatcher.topKProbability); q = (topkProbabilityOverrride != null) ? searchCluster.estimateHitsToFetch(neededHits, invokers.size(), topkProbabilityOverrride) : searchCluster.estimateHitsToFetch(neededHits, invokers.size()); } query.setHits(q); query.setOffset(0); Object context = null; for (SearchInvoker invoker : invokers) { context = invoker.sendSearchRequest(query, context); askedNodes++; } query.setHits(originalHits); query.setOffset(originalOffset); return null; } @Override private void insertNetworkErrors(Result result) { boolean asErrors = answeredNodes == 0; if (!invokers.isEmpty()) { String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)")) .collect(Collectors.joining(", ")); if (asErrors) { result.hits().addError(ErrorMessage .createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")")); } else { query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 2); } timedOut = true; } if (alreadyFailedNodes != null) { var message = "Connection failure on nodes with distribution-keys: " + alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", ")); if (asErrors) { result.hits().addError(ErrorMessage.createBackendCommunicationError(message)); } else { query.trace(message, 2); } int failed = alreadyFailedNodes.size(); askedNodes += failed; answeredNodes += failed; } } private long nextTimeout() { DispatchConfig config = searchCluster.dispatchConfig(); double minimumCoverage = config.minSearchCoverage(); if (askedNodes == answeredNodes || minimumCoverage >= 100.0) { return query.getTimeLeft(); } int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0); if (answeredNodes < minimumResponses) { return query.getTimeLeft(); } long timeLeft = query.getTimeLeft(); if (!adaptiveTimeoutCalculated) { adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor()); adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor()); adaptiveTimeoutCalculated = true; } long now = currentTime(); int pendingQueries = askedNodes - answeredNodes; double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0; double slopedWait = adaptiveTimeoutMin; if (pendingQueries > 1 && missWidth > 0.0) { slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth; } long nextAdaptive = (long) slopedWait; if (now + nextAdaptive >= deadline) { return deadline - now; } deadline = now + nextAdaptive; return nextAdaptive; } private String dbg(LeanHit hit) { var buf = new StringBuilder(); buf.append("LeanHit["); if (hit.hasSortData()) buf.append("hasSortData,"); buf.append("relevance=").append(hit.getRelevance()); buf.append(",partId=").append(hit.getPartId()); buf.append(",distributionKey=").append(hit.getDistributionKey()); buf.append("]"); return buf.toString(); } private List<LeanHit> mergeResult(Result result, InvokerResult partialResult, List<LeanHit> current) { collectCoverage(partialResult.getResult().getCoverage(true)); result.mergeWith(partialResult.getResult()); List<Hit> partialNonLean = partialResult.getResult().hits().asUnorderedHits(); for(Hit hit : partialNonLean) { if (hit.isAuxiliary()) { result.hits().add(hit); } } if (current.isEmpty() ) { return partialResult.getLeanHits(); } List<LeanHit> partial = partialResult.getLeanHits(); if (partial.isEmpty()) { return current; } int needed = query.getOffset() + query.getHits(); List<LeanHit> merged = new ArrayList<>(needed); int indexCurrent = 0; int indexPartial = 0; while (indexCurrent < current.size() && indexPartial < partial.size() && merged.size() < needed) { LeanHit incomingHit = partial.get(indexPartial); LeanHit currentHit = current.get(indexCurrent); int cmpRes = currentHit.compareTo(incomingHit); if (cmpRes < 0) { merged.add(currentHit); indexCurrent++; } else if (cmpRes > 0) { merged.add(incomingHit); indexPartial++; } else { merged.add(currentHit); indexCurrent++; indexPartial++; } } while ((indexCurrent < current.size()) && (merged.size() < needed)) { LeanHit currentHit = current.get(indexCurrent++); merged.add(currentHit); } while ((indexPartial < partial.size()) && (merged.size() < needed)) { LeanHit incomingHit = partial.get(indexPartial++); merged.add(incomingHit); } return merged; } private void collectCoverage(Coverage source) { answeredDocs += source.getDocs(); answeredActiveDocs += source.getActive(); answeredSoonActiveDocs += source.getSoonActive(); answeredNodesParticipated += source.getNodes(); answeredNodes++; degradedByMatchPhase |= source.isDegradedByMatchPhase(); timedOut |= source.isDegradedByTimeout(); } private Coverage createCoverage() { adjustDegradedCoverage(); Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1); coverage.setNodesTried(askedNodes); coverage.setSoonActive(answeredSoonActiveDocs); int degradedReason = 0; if (timedOut) { degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT); } if (degradedByMatchPhase) { degradedReason |= DEGRADED_BY_MATCH_PHASE; } coverage.setDegradedReason(degradedReason); return coverage; } private void adjustDegradedCoverage() { if (askedNodes == answeredNodesParticipated) { return; } int notAnswered = askedNodes - answeredNodesParticipated; if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) { answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated); answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated); } else { if (askedNodes > answeredNodesParticipated) { int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies(); int missingNodes = notAnswered - (searchableCopies - 1); if (answeredNodesParticipated > 0) { answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated); answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated); timedOut = true; } } } } private void ejectInvoker(SearchInvoker invoker) { invokers.remove(invoker); invoker.release(); } @Override protected void release() { if (!invokers.isEmpty()) { invokers.forEach(SearchInvoker::close); invokers.clear(); } } @Override public void responseAvailable(SearchInvoker from) { if (availableForProcessing != null) { availableForProcessing.add(from); } } @Override protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) { } protected long currentTime() { return System.currentTimeMillis(); } protected LinkedBlockingQueue<SearchInvoker> newQueue() { return new LinkedBlockingQueue<>(); } Collection<SearchInvoker> invokers() { return invokers; } }
class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> { private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName()); private final Set<SearchInvoker> invokers; private final SearchCluster searchCluster; private final LinkedBlockingQueue<SearchInvoker> availableForProcessing; private final Set<Integer> alreadyFailedNodes; private final boolean isContentWellBalanced; private Query query; private boolean adaptiveTimeoutCalculated = false; private long adaptiveTimeoutMin = 0; private long adaptiveTimeoutMax = 0; private long deadline = 0; private long answeredDocs = 0; private long answeredActiveDocs = 0; private long answeredSoonActiveDocs = 0; private int askedNodes = 0; private int answeredNodes = 0; private int answeredNodesParticipated = 0; private boolean timedOut = false; private boolean degradedByMatchPhase = false; public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, boolean isContentWellBalanced, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) { super(Optional.empty()); this.invokers = Collections.newSetFromMap(new IdentityHashMap<>()); this.invokers.addAll(invokers); this.searchCluster = searchCluster; this.availableForProcessing = newQueue(); this.alreadyFailedNodes = alreadyFailedNodes; this.isContentWellBalanced = isContentWellBalanced; } /** * Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search * query has an offset other than zero, it will be reset to zero and the expected hit amount * will be adjusted accordingly. */ @Override protected Object sendSearchRequest(Query query, Object unusedContext) throws IOException { this.query = query; invokers.forEach(invoker -> invoker.setMonitor(this)); deadline = currentTime() + query.getTimeLeft(); int originalHits = query.getHits(); int originalOffset = query.getOffset(); int neededHits = originalHits + originalOffset; int q = neededHits; if (isContentWellBalanced) { Double topkProbabilityOverrride = query.properties().getDouble(Dispatcher.topKProbability); q = (topkProbabilityOverrride != null) ? searchCluster.estimateHitsToFetch(neededHits, invokers.size(), topkProbabilityOverrride) : searchCluster.estimateHitsToFetch(neededHits, invokers.size()); } query.setHits(q); query.setOffset(0); Object context = null; for (SearchInvoker invoker : invokers) { context = invoker.sendSearchRequest(query, context); askedNodes++; } query.setHits(originalHits); query.setOffset(originalOffset); return null; } @Override private void insertNetworkErrors(Result result) { boolean asErrors = answeredNodes == 0; if (!invokers.isEmpty()) { String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)")) .collect(Collectors.joining(", ")); if (asErrors) { result.hits().addError(ErrorMessage .createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")")); } else { query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 2); } timedOut = true; } if (alreadyFailedNodes != null) { var message = "Connection failure on nodes with distribution-keys: " + alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", ")); if (asErrors) { result.hits().addError(ErrorMessage.createBackendCommunicationError(message)); } else { query.trace(message, 2); } int failed = alreadyFailedNodes.size(); askedNodes += failed; answeredNodes += failed; } } private long nextTimeout() { DispatchConfig config = searchCluster.dispatchConfig(); double minimumCoverage = config.minSearchCoverage(); if (askedNodes == answeredNodes || minimumCoverage >= 100.0) { return query.getTimeLeft(); } int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0); if (answeredNodes < minimumResponses) { return query.getTimeLeft(); } long timeLeft = query.getTimeLeft(); if (!adaptiveTimeoutCalculated) { adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor()); adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor()); adaptiveTimeoutCalculated = true; } long now = currentTime(); int pendingQueries = askedNodes - answeredNodes; double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0; double slopedWait = adaptiveTimeoutMin; if (pendingQueries > 1 && missWidth > 0.0) { slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth; } long nextAdaptive = (long) slopedWait; if (now + nextAdaptive >= deadline) { return deadline - now; } deadline = now + nextAdaptive; return nextAdaptive; } private String dbg(LeanHit hit) { var buf = new StringBuilder(); buf.append("LeanHit["); if (hit.hasSortData()) buf.append("hasSortData,"); buf.append("relevance=").append(hit.getRelevance()); buf.append(",partId=").append(hit.getPartId()); buf.append(",distributionKey=").append(hit.getDistributionKey()); buf.append("]"); return buf.toString(); } private List<LeanHit> mergeResult(Result result, InvokerResult partialResult, List<LeanHit> current) { collectCoverage(partialResult.getResult().getCoverage(true)); result.mergeWith(partialResult.getResult()); List<Hit> partialNonLean = partialResult.getResult().hits().asUnorderedHits(); for(Hit hit : partialNonLean) { if (hit.isAuxiliary()) { result.hits().add(hit); } } if (current.isEmpty() ) { return partialResult.getLeanHits(); } List<LeanHit> partial = partialResult.getLeanHits(); if (partial.isEmpty()) { return current; } int needed = query.getOffset() + query.getHits(); List<LeanHit> merged = new ArrayList<>(needed); int indexCurrent = 0; int indexPartial = 0; while (indexCurrent < current.size() && indexPartial < partial.size() && merged.size() < needed) { LeanHit incomingHit = partial.get(indexPartial); LeanHit currentHit = current.get(indexCurrent); int cmpRes = currentHit.compareTo(incomingHit); if (cmpRes < 0) { merged.add(currentHit); indexCurrent++; } else if (cmpRes > 0) { merged.add(incomingHit); indexPartial++; } else { merged.add(currentHit); indexCurrent++; indexPartial++; } } while ((indexCurrent < current.size()) && (merged.size() < needed)) { LeanHit currentHit = current.get(indexCurrent++); merged.add(currentHit); } while ((indexPartial < partial.size()) && (merged.size() < needed)) { LeanHit incomingHit = partial.get(indexPartial++); merged.add(incomingHit); } return merged; } private void collectCoverage(Coverage source) { answeredDocs += source.getDocs(); answeredActiveDocs += source.getActive(); answeredSoonActiveDocs += source.getSoonActive(); answeredNodesParticipated += source.getNodes(); answeredNodes++; degradedByMatchPhase |= source.isDegradedByMatchPhase(); timedOut |= source.isDegradedByTimeout(); } private Coverage createCoverage() { adjustDegradedCoverage(); Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1); coverage.setNodesTried(askedNodes); coverage.setSoonActive(answeredSoonActiveDocs); int degradedReason = 0; if (timedOut) { degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT); } if (degradedByMatchPhase) { degradedReason |= DEGRADED_BY_MATCH_PHASE; } coverage.setDegradedReason(degradedReason); return coverage; } private void adjustDegradedCoverage() { if (askedNodes == answeredNodesParticipated) { return; } int notAnswered = askedNodes - answeredNodesParticipated; if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) { answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated); answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated); } else { if (askedNodes > answeredNodesParticipated) { int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies(); int missingNodes = notAnswered - (searchableCopies - 1); if (answeredNodesParticipated > 0) { answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated); answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated); timedOut = true; } } } } private void ejectInvoker(SearchInvoker invoker) { invokers.remove(invoker); invoker.release(); } @Override protected void release() { if (!invokers.isEmpty()) { invokers.forEach(SearchInvoker::close); invokers.clear(); } } @Override public void responseAvailable(SearchInvoker from) { if (availableForProcessing != null) { availableForProcessing.add(from); } } @Override protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) { } protected long currentTime() { return System.currentTimeMillis(); } protected LinkedBlockingQueue<SearchInvoker> newQueue() { return new LinkedBlockingQueue<>(); } Collection<SearchInvoker> invokers() { return invokers; } }
1 million lines, that sounds a bit on the high side.
public static List<LogEntry> parseVespaLog(InputStream log, Instant from) { try (BufferedReader reader = new BufferedReader(new InputStreamReader(log, UTF_8))) { return reader.lines() .map(line -> line.split("\t")) .filter(parts -> parts.length == 7) .map(parts -> new LogEntry(0, Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS), typeOf(LogLevel.parse(parts[5])), parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' + parts[6].replaceAll("\\\\n", "\n") .replaceAll("\\\\t", "\t"))) .filter(entry -> entry.at().isAfter(from)) .limit(1_000_000) .collect(Collectors.toUnmodifiableList()); } catch (IOException e) { throw new UncheckedIOException(e); } }
.limit(1_000_000)
public static List<LogEntry> parseVespaLog(InputStream log, Instant from) { try (BufferedReader reader = new BufferedReader(new InputStreamReader(log, UTF_8))) { return reader.lines() .map(line -> line.split("\t")) .filter(parts -> parts.length == 7) .map(parts -> new LogEntry(0, Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS), typeOf(LogLevel.parse(parts[5])), parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' + parts[6].replaceAll("\\\\n", "\n") .replaceAll("\\\\t", "\t"))) .filter(entry -> entry.at().isAfter(from)) .limit(1_000_000) .collect(Collectors.toUnmodifiableList()); } catch (IOException e) { throw new UncheckedIOException(e); } }
class LogEntry { private final long id; private final Instant at; private final Type type; private final String message; public LogEntry(long id, Instant at, Type type, String message) { if (id < 0) throw new IllegalArgumentException("Id must be non-negative, but was " + id + "."); this.id = id; this.at = at; this.type = requireNonNull(type); this.message = requireNonNull(message); } public long id() { return id; } public Instant at() { return at; } public Type type() { return type; } public String message() { return message; } @Override public String toString() { return "LogEntry{" + "id=" + id + ", at=" + at.toEpochMilli() + ", type=" + type + ", message='" + message + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof LogEntry)) return false; LogEntry entry = (LogEntry) o; return id == entry.id && at.toEpochMilli() == entry.at.toEpochMilli() && type == entry.type && Objects.equals(message, entry.message); } @Override public int hashCode() { return Objects.hash(id, at, type, message); } public static Type typeOf(Level level) { return level.intValue() < Level.INFO.intValue() || level.intValue() == LogLevel.IntValEVENT ? Type.debug : level.intValue() < Level.WARNING.intValue() ? Type.info : level.intValue() < Level.SEVERE.intValue() ? Type.warning : Type.error; } /** The type of entry, used for rendering. */ public enum Type { debug, info, warning, error, html; } }
class LogEntry { private final long id; private final Instant at; private final Type type; private final String message; public LogEntry(long id, Instant at, Type type, String message) { if (id < 0) throw new IllegalArgumentException("Id must be non-negative, but was " + id + "."); this.id = id; this.at = at; this.type = requireNonNull(type); this.message = requireNonNull(message); } public long id() { return id; } public Instant at() { return at; } public Type type() { return type; } public String message() { return message; } @Override public String toString() { return "LogEntry{" + "id=" + id + ", at=" + at.toEpochMilli() + ", type=" + type + ", message='" + message + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof LogEntry)) return false; LogEntry entry = (LogEntry) o; return id == entry.id && at.toEpochMilli() == entry.at.toEpochMilli() && type == entry.type && Objects.equals(message, entry.message); } @Override public int hashCode() { return Objects.hash(id, at, type, message); } public static Type typeOf(Level level) { return level.intValue() < Level.INFO.intValue() || level.intValue() == LogLevel.IntValEVENT ? Type.debug : level.intValue() < Level.WARNING.intValue() ? Type.info : level.intValue() < Level.SEVERE.intValue() ? Type.warning : Type.error; } /** The type of entry, used for rendering. */ public enum Type { debug, info, warning, error, html; } }
Can probably set it lower, too. OOM at 40M lines, so it should be enough to fix _that_ issue, at least.
public static List<LogEntry> parseVespaLog(InputStream log, Instant from) { try (BufferedReader reader = new BufferedReader(new InputStreamReader(log, UTF_8))) { return reader.lines() .map(line -> line.split("\t")) .filter(parts -> parts.length == 7) .map(parts -> new LogEntry(0, Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS), typeOf(LogLevel.parse(parts[5])), parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' + parts[6].replaceAll("\\\\n", "\n") .replaceAll("\\\\t", "\t"))) .filter(entry -> entry.at().isAfter(from)) .limit(1_000_000) .collect(Collectors.toUnmodifiableList()); } catch (IOException e) { throw new UncheckedIOException(e); } }
.limit(1_000_000)
public static List<LogEntry> parseVespaLog(InputStream log, Instant from) { try (BufferedReader reader = new BufferedReader(new InputStreamReader(log, UTF_8))) { return reader.lines() .map(line -> line.split("\t")) .filter(parts -> parts.length == 7) .map(parts -> new LogEntry(0, Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS), typeOf(LogLevel.parse(parts[5])), parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' + parts[6].replaceAll("\\\\n", "\n") .replaceAll("\\\\t", "\t"))) .filter(entry -> entry.at().isAfter(from)) .limit(1_000_000) .collect(Collectors.toUnmodifiableList()); } catch (IOException e) { throw new UncheckedIOException(e); } }
class LogEntry { private final long id; private final Instant at; private final Type type; private final String message; public LogEntry(long id, Instant at, Type type, String message) { if (id < 0) throw new IllegalArgumentException("Id must be non-negative, but was " + id + "."); this.id = id; this.at = at; this.type = requireNonNull(type); this.message = requireNonNull(message); } public long id() { return id; } public Instant at() { return at; } public Type type() { return type; } public String message() { return message; } @Override public String toString() { return "LogEntry{" + "id=" + id + ", at=" + at.toEpochMilli() + ", type=" + type + ", message='" + message + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof LogEntry)) return false; LogEntry entry = (LogEntry) o; return id == entry.id && at.toEpochMilli() == entry.at.toEpochMilli() && type == entry.type && Objects.equals(message, entry.message); } @Override public int hashCode() { return Objects.hash(id, at, type, message); } public static Type typeOf(Level level) { return level.intValue() < Level.INFO.intValue() || level.intValue() == LogLevel.IntValEVENT ? Type.debug : level.intValue() < Level.WARNING.intValue() ? Type.info : level.intValue() < Level.SEVERE.intValue() ? Type.warning : Type.error; } /** The type of entry, used for rendering. */ public enum Type { debug, info, warning, error, html; } }
class LogEntry { private final long id; private final Instant at; private final Type type; private final String message; public LogEntry(long id, Instant at, Type type, String message) { if (id < 0) throw new IllegalArgumentException("Id must be non-negative, but was " + id + "."); this.id = id; this.at = at; this.type = requireNonNull(type); this.message = requireNonNull(message); } public long id() { return id; } public Instant at() { return at; } public Type type() { return type; } public String message() { return message; } @Override public String toString() { return "LogEntry{" + "id=" + id + ", at=" + at.toEpochMilli() + ", type=" + type + ", message='" + message + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof LogEntry)) return false; LogEntry entry = (LogEntry) o; return id == entry.id && at.toEpochMilli() == entry.at.toEpochMilli() && type == entry.type && Objects.equals(message, entry.message); } @Override public int hashCode() { return Objects.hash(id, at, type, message); } public static Type typeOf(Level level) { return level.intValue() < Level.INFO.intValue() || level.intValue() == LogLevel.IntValEVENT ? Type.debug : level.intValue() < Level.WARNING.intValue() ? Type.info : level.intValue() < Level.SEVERE.intValue() ? Type.warning : Type.error; } /** The type of entry, used for rendering. */ public enum Type { debug, info, warning, error, html; } }
I wish there was a `with(FetchVector.Dimension, Optional<String>)` method also so one could just do ``` .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().map(ApplicationId::serializedForm)) .with(...) ```
private ContainerResources getContainerResources(NodeAgentContext context) { final double cpuCap; if (noCpuCap(context.zone())) { cpuCap = 0.0; } else { DoubleFlag containerCpuCapFlag = containerCpuCap.with(FetchVector.Dimension.HOSTNAME, context.node().hostname());; if (context.node().owner().isPresent()) { containerCpuCapFlag = containerCpuCapFlag.with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().get().serializedForm()); } if (context.node().membership().isPresent()) { containerCpuCapFlag = containerCpuCapFlag .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().get().type().value()) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().get().clusterId()); } cpuCap = context.vcpuOnThisHost() * containerCpuCapFlag.value(); } return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); }
.with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().get().type().value())
private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.vcpuOnThisHost() * containerCpuCap .with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().map(ApplicationId::serializedForm)) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().map(NodeMembership::clusterId)) .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().map(membership -> membership.type().value())) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); containerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, Level.FINE, "Starting optional node program resume command"); containerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); containerOperations.createContainer(context, containerData, wantedResources); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); orchestratorSuspendNode(context); containerOperations.restartVespa(context); currentRestartGeneration = context.node().wantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices( NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state.isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; containerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image)) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() != NodeState.dirty) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources)) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources.memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just updated")); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || zone.getSystemName().isCd(); } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, Level.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container); if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources.equalsCpu(getContainerResources(context))) throw new ConvergenceException("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } updateNodeRepoWithCurrentAttributes(context); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.state().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void addDirectory(Path pathInContainer) { throw new UnsupportedOperationException("addDirectory not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getSystemName().isCd() || zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration; } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); containerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, Level.FINE, "Starting optional node program resume command"); containerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); containerOperations.createContainer(context, containerData, wantedResources); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); orchestratorSuspendNode(context); containerOperations.restartVespa(context); currentRestartGeneration = context.node().wantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices( NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state.isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; containerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image)) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() != NodeState.dirty) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources)) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources.memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just updated")); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || zone.getSystemName().isCd(); } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, Level.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container); if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources.equalsCpu(getContainerResources(context))) throw new ConvergenceException("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } updateNodeRepoWithCurrentAttributes(context); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.state().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void addDirectory(Path pathInContainer) { throw new UnsupportedOperationException("addDirectory not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getSystemName().isCd() || zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration; } }
Good suggestion, fixed.
private ContainerResources getContainerResources(NodeAgentContext context) { final double cpuCap; if (noCpuCap(context.zone())) { cpuCap = 0.0; } else { DoubleFlag containerCpuCapFlag = containerCpuCap.with(FetchVector.Dimension.HOSTNAME, context.node().hostname());; if (context.node().owner().isPresent()) { containerCpuCapFlag = containerCpuCapFlag.with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().get().serializedForm()); } if (context.node().membership().isPresent()) { containerCpuCapFlag = containerCpuCapFlag .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().get().type().value()) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().get().clusterId()); } cpuCap = context.vcpuOnThisHost() * containerCpuCapFlag.value(); } return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); }
.with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().get().type().value())
private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.vcpuOnThisHost() * containerCpuCap .with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().map(ApplicationId::serializedForm)) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().map(NodeMembership::clusterId)) .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().map(membership -> membership.type().value())) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); containerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, Level.FINE, "Starting optional node program resume command"); containerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); containerOperations.createContainer(context, containerData, wantedResources); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); orchestratorSuspendNode(context); containerOperations.restartVespa(context); currentRestartGeneration = context.node().wantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices( NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state.isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; containerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image)) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() != NodeState.dirty) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources)) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources.memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just updated")); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || zone.getSystemName().isCd(); } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, Level.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container); if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources.equalsCpu(getContainerResources(context))) throw new ConvergenceException("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } updateNodeRepoWithCurrentAttributes(context); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.state().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void addDirectory(Path pathInContainer) { throw new UnsupportedOperationException("addDirectory not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getSystemName().isCd() || zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration; } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); containerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, Level.FINE, "Starting optional node program resume command"); containerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); containerOperations.createContainer(context, containerData, wantedResources); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); orchestratorSuspendNode(context); containerOperations.restartVespa(context); currentRestartGeneration = context.node().wantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices( NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state.isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; containerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image)) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() != NodeState.dirty) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources)) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources.memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> new ConvergenceException("Did not find container that was just updated")); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || zone.getSystemName().isCd(); } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, Level.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container); if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources.equalsCpu(getContainerResources(context))) throw new ConvergenceException("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } updateNodeRepoWithCurrentAttributes(context); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.state().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void addDirectory(Path pathInContainer) { throw new UnsupportedOperationException("addDirectory not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getSystemName().isCd() || zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration; } }
it's probably better to use a switch instead of if-else-if-else here and similar places
static void indexedBlockToString(IndexedTensor tensor, Indexes indexes, StringBuilder b) { for (int index = 0; index < tensor.size(); index++) { indexes.next(); for (int i = 0; i < indexes.nextDimensionsAtStart(); i++) b.append("["); if (tensor.type().valueType() == TensorType.Value.DOUBLE) b.append(tensor.get(index)); else if (tensor.type().valueType() == TensorType.Value.FLOAT) b.append(tensor.getFloat(index)); else if (tensor.type().valueType() == TensorType.Value.BFLOAT16) b.append(tensor.getFloat(index)); else if (tensor.type().valueType() == TensorType.Value.INT8) b.append(tensor.getFloat(index)); else throw new IllegalStateException("Unexpected value type " + tensor.type().valueType()); for (int i = 0; i < indexes.nextDimensionsAtEnd(); i++) b.append("]"); if (index < tensor.size() - 1) b.append(", "); } }
b.append(tensor.getFloat(index));
static void indexedBlockToString(IndexedTensor tensor, Indexes indexes, StringBuilder b) { for (int index = 0; index < tensor.size(); index++) { indexes.next(); for (int i = 0; i < indexes.nextDimensionsAtStart(); i++) b.append("["); switch (tensor.type().valueType()) { case DOUBLE: b.append(tensor.get(index)); break; case FLOAT: b.append(tensor.getFloat(index)); break; case BFLOAT16: b.append(tensor.getFloat(index)); break; case INT8: b.append(tensor.getFloat(index)); break; default: throw new IllegalStateException("Unexpected value type " + tensor.type().valueType()); } for (int i = 0; i < indexes.nextDimensionsAtEnd(); i++) b.append("]"); if (index < tensor.size() - 1) b.append(", "); } }
class IndexedTensor implements Tensor { /** The prescribed and possibly abstract type this is an instance of */ private final TensorType type; /** The sizes of the dimensions of this in the order of the dimensions of the type */ private final DimensionSizes dimensionSizes; IndexedTensor(TensorType type, DimensionSizes dimensionSizes) { this.type = type; this.dimensionSizes = dimensionSizes; } /** * Returns an iterator over the cells of this in the <i>standard value order</i>. */ @Override public Iterator<Cell> cellIterator() { return new CellIterator(); } /** Returns an iterator over all the cells in this tensor which matches the given partial address */ public SubspaceIterator cellIterator(PartialAddress partialAddress, DimensionSizes iterationSizes) { long[] startAddress = new long[type().dimensions().size()]; List<Integer> iterateDimensions = new ArrayList<>(); for (int i = 0; i < type().dimensions().size(); i++) { long partialAddressLabel = partialAddress.numericLabel(type.dimensions().get(i).name()); if (partialAddressLabel >= 0) startAddress[i] = partialAddressLabel; else iterateDimensions.add(i); } return new SubspaceIterator(iterateDimensions, startAddress, iterationSizes); } /** Returns an iterator over the values of this returned in the <i>standard value order</i> */ @Override public Iterator<Double> valueIterator() { return new ValueIterator(); } /** * Returns an iterator over value iterators where the outer iterator is over each unique value of the dimensions * given and the inner iterator is over each unique value of the rest of the dimensions, in the * <i>standard value order</i> * * @param dimensions the names of the dimensions of the superspace * @param sizes the size of each dimension in the space we are returning values for, containing * one value per dimension of this tensor (in order). Each size may be the same or smaller * than the corresponding size of this tensor */ public Iterator<SubspaceIterator> subspaceIterator(Set<String> dimensions, DimensionSizes sizes) { return new SuperspaceIterator(dimensions, sizes); } /** Returns a subspace iterator having the sizes of the dimensions of this tensor */ public Iterator<SubspaceIterator> subspaceIterator(Set<String> dimensions) { return subspaceIterator(dimensions, dimensionSizes); } /** * Returns the value at the given indexes as a double * * @param indexes the indexes into the dimensions of this. Must be one number per dimension of this * @throws IllegalArgumentException if any of the indexes are out of bound or a wrong number of indexes are given */ public double get(long ... indexes) { return get((int)toValueIndex(indexes, dimensionSizes)); } /** * Returns the value at the given indexes as a float * * @param indexes the indexes into the dimensions of this. Must be one number per dimension of this * @throws IllegalArgumentException if any of the indexes are out of bound or a wrong number of indexes are given */ public float getFloat(long ... indexes) { return getFloat((int)toValueIndex(indexes, dimensionSizes)); } /** Returns the value at this address, or NaN if there is no value at this address */ @Override public double get(TensorAddress address) { try { return get((int)toValueIndex(address, dimensionSizes, type)); } catch (IllegalArgumentException e) { return Double.NaN; } } /** * Returns the value at the given <i>standard value order</i> index as a double. * * @param valueIndex the direct index into the underlying data. * @throws IllegalArgumentException if index is out of bounds */ public abstract double get(long valueIndex); /** * Returns the value at the given <i>standard value order</i> index as a float. * * @param valueIndex the direct index into the underlying data. * @throws IllegalArgumentException if index is out of bounds */ public abstract float getFloat(long valueIndex); static long toValueIndex(long[] indexes, DimensionSizes sizes) { if (indexes.length == 1) return indexes[0]; if (indexes.length == 0) return 0; long valueIndex = 0; for (int i = 0; i < indexes.length; i++) { if (indexes[i] >= sizes.size(i)) throw new IllegalArgumentException(Arrays.toString(indexes) + " are not within bounds"); valueIndex += productOfDimensionsAfter(i, sizes) * indexes[i]; } return valueIndex; } static long toValueIndex(TensorAddress address, DimensionSizes sizes, TensorType type) { if (address.isEmpty()) return 0; long valueIndex = 0; for (int i = 0; i < address.size(); i++) { if (address.numericLabel(i) >= sizes.size(i)) throw new IllegalArgumentException(address + " is not within the bounds of " + type); valueIndex += productOfDimensionsAfter(i, sizes) * address.numericLabel(i); } return valueIndex; } private static long productOfDimensionsAfter(int afterIndex, DimensionSizes sizes) { long product = 1; for (int i = afterIndex + 1; i < sizes.dimensions(); i++) product *= sizes.size(i); return product; } void throwOnIncompatibleType(TensorType type) { if ( ! this.type().isRenamableTo(type)) throw new IllegalArgumentException("Can not change type from " + this.type() + " to " + type + ": Types are not compatible"); } @Override public TensorType type() { return type; } @Override public abstract IndexedTensor withType(TensorType type); public DimensionSizes dimensionSizes() { return dimensionSizes; } @Override public Map<TensorAddress, Double> cells() { if (dimensionSizes.dimensions() == 0) return Collections.singletonMap(TensorAddress.of(), get(0)); ImmutableMap.Builder<TensorAddress, Double> builder = new ImmutableMap.Builder<>(); Indexes indexes = Indexes.of(dimensionSizes, dimensionSizes, size()); for (long i = 0; i < size(); i++) { indexes.next(); builder.put(indexes.toAddress(), get(i)); } return builder.build(); } @Override public Tensor remove(Set<TensorAddress> addresses) { throw new IllegalArgumentException("Remove is not supported for indexed tensors"); } @Override public String toString() { if (type.rank() == 0) return Tensor.toStandardString(this); if (type.dimensions().stream().anyMatch(d -> d.size().isEmpty())) return Tensor.toStandardString(this); Indexes indexes = Indexes.of(dimensionSizes); StringBuilder b = new StringBuilder(type.toString()).append(":"); indexedBlockToString(this, indexes, b); return b.toString(); } @Override public boolean equals(Object other) { if ( ! ( other instanceof Tensor)) return false; return Tensor.equals(this, ((Tensor)other)); } public abstract static class Builder implements Tensor.Builder { final TensorType type; private Builder(TensorType type) { this.type = type; } public static Builder of(TensorType type) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type)); else return new UnboundBuilder(type); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor. This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, float[] values) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type), values); else return new UnboundBuilder(type); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor. This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, double[] values) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type), values); else return new UnboundBuilder(type); } /** * Create a builder with dimension size information for this instance. Must be one size entry per dimension, * and, agree with the type size information when specified in the type. * If sizes are completely specified in the type this size information is redundant. */ public static Builder of(TensorType type, DimensionSizes sizes) { validate(type, sizes); if (type.valueType() == TensorType.Value.FLOAT) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); else if (type.valueType() == TensorType.Value.BFLOAT16) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); else if (type.valueType() == TensorType.Value.INT8) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); else if (type.valueType() == TensorType.Value.DOUBLE) return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes); else return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor in the <i>standard value order</i>. * This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, DimensionSizes sizes, float[] values) { validate(type, sizes); validateSizes(sizes, values.length); if (type.valueType() == TensorType.Value.FLOAT) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes, values); else if (type.valueType() == TensorType.Value.BFLOAT16) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.INT8) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.DOUBLE) return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes).fill(values); else return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes).fill(values); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor in the <i>standard value order</i>. * This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, DimensionSizes sizes, double[] values) { validate(type, sizes); validateSizes(sizes, values.length); if (type.valueType() == TensorType.Value.FLOAT) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.BFLOAT16) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.INT8) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.DOUBLE) return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes, values); else return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes, values); } private static void validateSizes(DimensionSizes sizes, int length) { if (sizes.totalSize() != length) { throw new IllegalArgumentException("Invalid size(" + length + ") of supplied value vector." + " Type specifies that size should be " + sizes.totalSize()); } } private static void validate(TensorType type, DimensionSizes sizes) { if (sizes.dimensions() != type.dimensions().size()) throw new IllegalArgumentException(sizes.dimensions() + " is the wrong number of dimensions for " + type); for (int i = 0; i < sizes.dimensions(); i++ ) { Optional<Long> size = type.dimensions().get(i).size(); if (size.isPresent() && size.get() < sizes.size(i)) throw new IllegalArgumentException("Size of dimension " + type.dimensions().get(i).name() + " is " + sizes.size(i) + " but cannot be larger than " + size.get() + " in " + type); } } public abstract Builder cell(double value, long ... indexes); public abstract Builder cell(float value, long ... indexes); @Override public TensorType type() { return type; } @Override public abstract IndexedTensor build(); } public interface DirectIndexBuilder { TensorType type(); /** Sets a value by its <i>standard value order</i> index */ void cellByDirectIndex(long index, double value); /** Sets a value by its <i>standard value order</i> index */ void cellByDirectIndex(long index, float value); } /** A bound builder can create the double array directly */ public static abstract class BoundBuilder extends Builder implements DirectIndexBuilder { private DimensionSizes sizes; private static DimensionSizes dimensionSizesOf(TensorType type) { DimensionSizes.Builder b = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < type.dimensions().size(); i++) b.set(i, type.dimensions().get(i).size().get()); return b.build(); } BoundBuilder(TensorType type, DimensionSizes sizes) { super(type); if ( sizes.dimensions() != type.dimensions().size()) throw new IllegalArgumentException("Must have a dimension size entry for each dimension in " + type); this.sizes = sizes; } BoundBuilder fill(float[] values) { long index = 0; for (float value : values) { cellByDirectIndex(index++, value); } return this; } BoundBuilder fill(double[] values) { long index = 0; for (double value : values) { cellByDirectIndex(index++, value); } return this; } DimensionSizes sizes() { return sizes; } } /** * A builder used when we don't know the size of the dimensions up front. * All values is all dimensions must be specified. */ private static class UnboundBuilder extends Builder { /** List of List or Double */ private List<Object> firstDimension = null; private UnboundBuilder(TensorType type) { super(type); } @Override public IndexedTensor build() { if (firstDimension == null) throw new IllegalArgumentException("Tensor of type " + type() + " has no values"); if (type.dimensions().isEmpty()) return new IndexedDoubleTensor(type, new DimensionSizes.Builder(type.dimensions().size()).build(), new double[] {(Double) firstDimension.get(0) }); DimensionSizes dimensionSizes = findDimensionSizes(firstDimension); double[] values = new double[(int)dimensionSizes.totalSize()]; fillValues(0, 0, firstDimension, dimensionSizes, values); return new IndexedDoubleTensor(type, dimensionSizes, values); } private DimensionSizes findDimensionSizes(List<Object> firstDimension) { List<Long> dimensionSizeList = new ArrayList<>(type.dimensions().size()); findDimensionSizes(0, dimensionSizeList, firstDimension); DimensionSizes.Builder b = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < b.dimensions(); i++) { if (i < dimensionSizeList.size()) b.set(i, dimensionSizeList.get(i)); } return b.build(); } @SuppressWarnings("unchecked") private void findDimensionSizes(int currentDimensionIndex, List<Long> dimensionSizes, List<Object> currentDimension) { if (currentDimensionIndex == dimensionSizes.size()) dimensionSizes.add((long)currentDimension.size()); else if (dimensionSizes.get(currentDimensionIndex) != currentDimension.size()) throw new IllegalArgumentException("Missing values in dimension " + type.dimensions().get(currentDimensionIndex) + " in " + type); for (Object value : currentDimension) if (value instanceof List) findDimensionSizes(currentDimensionIndex + 1, dimensionSizes, (List<Object>)value); } @SuppressWarnings("unchecked") private void fillValues(int currentDimensionIndex, long offset, List<Object> currentDimension, DimensionSizes sizes, double[] values) { if (currentDimensionIndex < sizes.dimensions() - 1) { for (long i = 0; i < currentDimension.size(); i++) fillValues(currentDimensionIndex + 1, offset + productOfDimensionsAfter(currentDimensionIndex, sizes) * i, (List<Object>) currentDimension.get((int)i), sizes, values); } else { for (long i = 0; i < currentDimension.size(); i++) { values[(int)(offset + i)] = nullAsZero((Double)currentDimension.get((int)i)); } } } private double nullAsZero(Double value) { if (value == null) return 0; return value; } @Override public CellBuilder cell() { return new CellBuilder(type, this); } @Override public Builder cell(TensorAddress address, float value) { return cell(address, (double)value); } @Override public Builder cell(TensorAddress address, double value) { long[] indexes = new long[address.size()]; for (int i = 0; i < address.size(); i++) { indexes[i] = address.numericLabel(i); } cell(value, indexes); return this; } @Override public Builder cell(float value, long... indexes) { return cell((double)value, indexes); } /** * Set a value using an index API. The number of indexes must be the same as the dimensions in the type of this. * Values can be written in any order but all values needed to make this dense must be provided * before building this. * * @return this for chaining */ @SuppressWarnings("unchecked") @Override public Builder cell(double value, long... indexes) { if (indexes.length != type.dimensions().size()) throw new IllegalArgumentException("Wrong number of indexes (" + indexes.length + ") for " + type); if (indexes.length == 0) { firstDimension = Collections.singletonList(value); return this; } if (firstDimension == null) firstDimension = new ArrayList<>(); List<Object> currentValues = firstDimension; for (int dimensionIndex = 0; dimensionIndex < indexes.length; dimensionIndex++) { ensureCapacity(indexes[dimensionIndex], currentValues); if (dimensionIndex == indexes.length - 1) { currentValues.set((int)indexes[dimensionIndex], value); } else { if (currentValues.get((int)indexes[dimensionIndex]) == null) currentValues.set((int)indexes[dimensionIndex], new ArrayList<>()); currentValues = (List<Object>) currentValues.get((int)indexes[dimensionIndex]); } } return this; } /** Fill the given list with nulls if necessary to make sure it has a (possibly null) value at the given index */ private void ensureCapacity(long index, List<Object> list) { while (list.size() <= index) list.add(list.size(), null); } } private final class CellIterator implements Iterator<Cell> { private long count = 0; private final Indexes indexes = Indexes.of(dimensionSizes, dimensionSizes, size()); private final LazyCell reusedCell = new LazyCell(indexes, Double.NaN); @Override public boolean hasNext() { return count < indexes.size(); } @Override public Cell next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + indexes); count++; indexes.next(); reusedCell.value = get(indexes.toSourceValueIndex()); return reusedCell; } } private final class ValueIterator implements Iterator<Double> { private long count = 0; @Override public boolean hasNext() { return count < size(); } @Override public Double next() { try { return get(count++); } catch (IllegalArgumentException e) { throw new NoSuchElementException("No element at position " + count); } } } private final class SuperspaceIterator implements Iterator<SubspaceIterator> { private final Indexes superindexes; /** The indexes this should iterate over */ private final List<Integer> subdimensionIndexes; /** * The sizes of the space we'll return values of, one value for each dimension of this tensor, * which may be equal to or smaller than the sizes of this tensor */ private final DimensionSizes iterateSizes; private long count = 0; private SuperspaceIterator(Set<String> superdimensionNames, DimensionSizes iterateSizes) { this.iterateSizes = iterateSizes; List<Integer> superdimensionIndexes = new ArrayList<>(superdimensionNames.size()); subdimensionIndexes = new ArrayList<>(superdimensionNames.size()); for (int i = type.dimensions().size() - 1; i >= 0; i-- ) { if (superdimensionNames.contains(type.dimensions().get(i).name())) superdimensionIndexes.add(i); else subdimensionIndexes.add(i); } superindexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, superdimensionIndexes); } @Override public boolean hasNext() { return count < superindexes.size(); } @Override public SubspaceIterator next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + superindexes); count++; superindexes.next(); return new SubspaceIterator(subdimensionIndexes, superindexes.indexesCopy(), iterateSizes); } } /** * An iterator over a subspace of this tensor. This is exposed to allow clients to query the size. * NOTE THAT the Cell returned by next is only valid until the next() call is made. * This is a concession to performance due to this typically being used in inner loops. */ public final class SubspaceIterator implements Iterator<Tensor.Cell> { /** * This iterator will iterate over the given dimensions, in the order given * (the first dimension index given is incremented to exhaustion first (i.e is etc.). * This may be any subset of the dimensions given by address and dimensionSizes. */ private final List<Integer> iterateDimensions; private final long[] address; private final DimensionSizes iterateSizes; private Indexes indexes; private long count = 0; /** A lazy cell for reuse */ private final LazyCell reusedCell; /** * Creates a new subspace iterator * * @param iterateDimensions the dimensions to iterate over, given as indexes in the dimension order of the * type of the tensor this iterates over. This iterator will iterate over these * dimensions to exhaustion in the order given (the first dimension index given is * incremented to exhaustion first etc., while other dimensions will be held * at a constant position. * This may be any subset of the dimensions given by address and dimensionSizes. * This is treated as immutable. * @param address the address of the first cell of this subspace. */ private SubspaceIterator(List<Integer> iterateDimensions, long[] address, DimensionSizes iterateSizes) { this.iterateDimensions = iterateDimensions; this.address = address; this.iterateSizes = iterateSizes; this.indexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, iterateDimensions, address); reusedCell = new LazyCell(indexes, Double.NaN); } /** Returns the total number of cells in this subspace */ public long size() { return indexes.size(); } /** Returns the address of the cell this currently points to (which may be an invalid position) */ public TensorAddress address() { return indexes.toAddress(); } /** Rewind this iterator to the first element */ public void reset() { this.count = 0; this.indexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, iterateDimensions, address); } @Override public boolean hasNext() { return count < indexes.size(); } /** Returns the next cell, which is valid until next() is called again */ @Override public Cell next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + indexes); count++; indexes.next(); reusedCell.value = get(indexes.toSourceValueIndex()); return reusedCell; } } /** A Cell which does not compute its TensorAddress unless it really has to */ private final static class LazyCell extends Tensor.Cell { private double value; private Indexes indexes; private LazyCell(Indexes indexes, Double value) { super(null, value); this.indexes = indexes; } @Override long getDirectIndex() { return indexes.toIterationValueIndex(); } @Override public TensorAddress getKey() { return indexes.toAddress(); } @Override public Double getValue() { return value; } @Override public Cell detach() { return new Cell(getKey(), value); } } /** * An array of indexes into this tensor which are able to find the next index in the value order. * next() can be called once per element in the dimensions we iterate over. It must be called once * before accessing the first position. */ public abstract static class Indexes { private final DimensionSizes sourceSizes; private final DimensionSizes iterationSizes; protected final long[] indexes; /** * Create indexes from a type containing bound indexed dimensions only. * * @throws IllegalStateException if the type contains dimensions which are not bound and indexed */ public static Indexes of(TensorType type) { return of(DimensionSizes.of(type)); } public static Indexes of(TensorType type, List<String> iterateDimensionOrder) { return of(DimensionSizes.of(type), toIterationOrder(iterateDimensionOrder, type)); } public static Indexes of(DimensionSizes sizes) { return of(sizes, sizes); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes) { return of(sourceSizes, iterateSizes, completeIterationOrder(iterateSizes.dimensions())); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long size) { return of(sourceSizes, iterateSizes, completeIterationOrder(iterateSizes.dimensions()), size); } private static Indexes of(DimensionSizes sizes, List<Integer> iterateDimensions) { return of(sizes, sizes, iterateDimensions); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions) { return of(sourceSizes, iterateSizes, iterateDimensions, computeSize(iterateSizes, iterateDimensions)); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long size) { return of(sourceSizes, iterateSizes, iterateDimensions, new long[iterateSizes.dimensions()], size); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes) { return of(sourceSizes, iterateSizes, iterateDimensions, initialIndexes, computeSize(iterateSizes, iterateDimensions)); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { if (size == 0) { return new EmptyIndexes(sourceSizes, iterateSizes, initialIndexes); } else if (size == 1) { return new SingleValueIndexes(sourceSizes, iterateSizes, initialIndexes); } else if (iterateDimensions.size() == 1) { if (sourceSizes.equals(iterateSizes)) return new EqualSizeSingleDimensionIndexes(sourceSizes, iterateDimensions.get(0), initialIndexes, size); else return new SingleDimensionIndexes(sourceSizes, iterateSizes, iterateDimensions.get(0), initialIndexes, size); } else { if (sourceSizes.equals(iterateSizes)) return new EqualSizeMultiDimensionIndexes(sourceSizes, iterateDimensions, initialIndexes, size); else return new MultiDimensionIndexes(sourceSizes, iterateSizes, iterateDimensions, initialIndexes, size); } } private static List<Integer> toIterationOrder(List<String> dimensionNames, TensorType type) { if (dimensionNames == null) return completeIterationOrder(type.rank()); List<Integer> iterationDimensions = new ArrayList<>(type.rank()); for (int i = 0; i < type.rank(); i++) iterationDimensions.add(type.rank() - 1 - type.indexOfDimension(dimensionNames.get(i)).get()); return iterationDimensions; } /** Since the right dimensions binds closest, iteration order is the opposite of the tensor order */ private static List<Integer> completeIterationOrder(int length) { List<Integer> iterationDimensions = new ArrayList<>(length); for (int i = 0; i < length; i++) iterationDimensions.add(length - 1 - i); return iterationDimensions; } private Indexes(DimensionSizes sourceSizes, DimensionSizes iterationSizes, long[] indexes) { this.sourceSizes = sourceSizes; this.iterationSizes = iterationSizes; this.indexes = indexes; } private static long computeSize(DimensionSizes sizes, List<Integer> iterateDimensions) { long size = 1; for (int iterateDimension : iterateDimensions) size *= sizes.size(iterateDimension); return size; } /** Returns the address of the current position of these indexes */ public TensorAddress toAddress() { return TensorAddress.of(indexes); } public long[] indexesCopy() { return Arrays.copyOf(indexes, indexes.length); } /** Returns a copy of the indexes of this which must not be modified */ public long[] indexesForReading() { return indexes; } public long toSourceValueIndex() { return IndexedTensor.toValueIndex(indexes, sourceSizes); } long toIterationValueIndex() { return IndexedTensor.toValueIndex(indexes, iterationSizes); } DimensionSizes dimensionSizes() { return iterationSizes; } /** Returns an immutable list containing a copy of the indexes in this */ public List<Long> toList() { ImmutableList.Builder<Long> builder = new ImmutableList.Builder<>(); for (long index : indexes) builder.add(index); return builder.build(); } @Override public String toString() { return "indexes " + Arrays.toString(indexes); } public abstract long size(); public abstract void next(); /** Returns whether further values are available by calling next() */ public abstract boolean hasNext(); /** Returns the number of dimensions in iteration order which are currently at the start position (0) */ abstract int nextDimensionsAtStart(); /** Returns the number of dimensions in iteration order which are currently at their end position */ abstract int nextDimensionsAtEnd(); } private final static class EmptyIndexes extends Indexes { private EmptyIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long[] indexes) { super(sourceSizes, iterateSizes, indexes); } @Override public long size() { return 0; } @Override public void next() {} @Override public boolean hasNext() { return false; } @Override int nextDimensionsAtStart() { return 0; } @Override int nextDimensionsAtEnd() { return 0; } } private final static class SingleValueIndexes extends Indexes { private boolean exhausted = false; private SingleValueIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long[] indexes) { super(sourceSizes, iterateSizes, indexes); } @Override public long size() { return 1; } @Override public void next() { exhausted = true; } @Override public boolean hasNext() { return ! exhausted; } @Override int nextDimensionsAtStart() { return 1; } @Override int nextDimensionsAtEnd() { return 1; } } private static class MultiDimensionIndexes extends Indexes { private final long size; private final List<Integer> iterateDimensions; private MultiDimensionIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { super(sourceSizes, iterateSizes, initialIndexes); this.iterateDimensions = iterateDimensions; this.size = size; indexes[iterateDimensions.get(0)]--; } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { int iterateDimensionsIndex = 0; while ( indexes[iterateDimensions.get(iterateDimensionsIndex)] + 1 == dimensionSizes().size(iterateDimensions.get(iterateDimensionsIndex))) { indexes[iterateDimensions.get(iterateDimensionsIndex)] = 0; iterateDimensionsIndex++; } indexes[iterateDimensions.get(iterateDimensionsIndex)]++; } @Override public boolean hasNext() { for (int iterateDimension : iterateDimensions) { if (indexes[iterateDimension] + 1 < dimensionSizes().size(iterateDimension)) return true; } return false; } @Override int nextDimensionsAtStart() { int dimension = 0; while (dimension < iterateDimensions.size() && indexes[iterateDimensions.get(dimension)] == 0) dimension++; return dimension; } @Override int nextDimensionsAtEnd() { int dimension = 0; while (dimension < iterateDimensions.size() && indexes[iterateDimensions.get(dimension)] == dimensionSizes().size(iterateDimensions.get(dimension)) - 1) dimension++; return dimension; } } /** In this case we can reuse the source index computation for the iteration index */ private final static class EqualSizeMultiDimensionIndexes extends MultiDimensionIndexes { private long lastComputedSourceValueIndex = -1; private EqualSizeMultiDimensionIndexes(DimensionSizes sizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { super(sizes, sizes, iterateDimensions, initialIndexes, size); } @Override public long toSourceValueIndex() { return lastComputedSourceValueIndex = super.toSourceValueIndex(); } @Override long toIterationValueIndex() { return lastComputedSourceValueIndex; } } /** In this case we can keep track of indexes using a step instead of using the more elaborate computation */ private final static class SingleDimensionIndexes extends Indexes { private final long size; private final int iterateDimension; /** Maintain this directly as an optimization for 1-d iteration */ private long currentSourceValueIndex, currentIterationValueIndex; /** The iteration step in the value index space */ private final long sourceStep, iterationStep; private SingleDimensionIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, int iterateDimension, long[] initialIndexes, long size) { super(sourceSizes, iterateSizes, initialIndexes); this.iterateDimension = iterateDimension; this.size = size; this.sourceStep = productOfDimensionsAfter(iterateDimension, sourceSizes); this.iterationStep = productOfDimensionsAfter(iterateDimension, iterateSizes); indexes[iterateDimension]--; currentSourceValueIndex = IndexedTensor.toValueIndex(indexes, sourceSizes); currentIterationValueIndex = IndexedTensor.toValueIndex(indexes, iterateSizes); } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { indexes[iterateDimension]++; currentSourceValueIndex += sourceStep; currentIterationValueIndex += iterationStep; } @Override public long toSourceValueIndex() { return currentSourceValueIndex; } @Override long toIterationValueIndex() { return currentIterationValueIndex; } @Override public boolean hasNext() { return indexes[iterateDimension] + 1 < size; } @Override int nextDimensionsAtStart() { return currentSourceValueIndex == 0 ? 1 : 0; } @Override int nextDimensionsAtEnd() { return currentSourceValueIndex == size - 1 ? 1 : 0; } } /** In this case we only need to keep track of one index */ private final static class EqualSizeSingleDimensionIndexes extends Indexes { private final long size; private final int iterateDimension; /** Maintain this directly as an optimization for 1-d iteration */ private long currentValueIndex; /** The iteration step in the value index space */ private final long step; private EqualSizeSingleDimensionIndexes(DimensionSizes sizes, int iterateDimension, long[] initialIndexes, long size) { super(sizes, sizes, initialIndexes); this.iterateDimension = iterateDimension; this.size = size; this.step = productOfDimensionsAfter(iterateDimension, sizes); indexes[iterateDimension]--; currentValueIndex = IndexedTensor.toValueIndex(indexes, sizes); } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { indexes[iterateDimension]++; currentValueIndex += step; } @Override public boolean hasNext() { return indexes[iterateDimension] + 1 < size; } @Override public long toSourceValueIndex() { return currentValueIndex; } @Override long toIterationValueIndex() { return currentValueIndex; } @Override int nextDimensionsAtStart() { return currentValueIndex == 0 ? 1 : 0; } @Override int nextDimensionsAtEnd() { return currentValueIndex == size - 1 ? 1 : 0; } } }
class IndexedTensor implements Tensor { /** The prescribed and possibly abstract type this is an instance of */ private final TensorType type; /** The sizes of the dimensions of this in the order of the dimensions of the type */ private final DimensionSizes dimensionSizes; IndexedTensor(TensorType type, DimensionSizes dimensionSizes) { this.type = type; this.dimensionSizes = dimensionSizes; } /** * Returns an iterator over the cells of this in the <i>standard value order</i>. */ @Override public Iterator<Cell> cellIterator() { return new CellIterator(); } /** Returns an iterator over all the cells in this tensor which matches the given partial address */ public SubspaceIterator cellIterator(PartialAddress partialAddress, DimensionSizes iterationSizes) { long[] startAddress = new long[type().dimensions().size()]; List<Integer> iterateDimensions = new ArrayList<>(); for (int i = 0; i < type().dimensions().size(); i++) { long partialAddressLabel = partialAddress.numericLabel(type.dimensions().get(i).name()); if (partialAddressLabel >= 0) startAddress[i] = partialAddressLabel; else iterateDimensions.add(i); } return new SubspaceIterator(iterateDimensions, startAddress, iterationSizes); } /** Returns an iterator over the values of this returned in the <i>standard value order</i> */ @Override public Iterator<Double> valueIterator() { return new ValueIterator(); } /** * Returns an iterator over value iterators where the outer iterator is over each unique value of the dimensions * given and the inner iterator is over each unique value of the rest of the dimensions, in the * <i>standard value order</i> * * @param dimensions the names of the dimensions of the superspace * @param sizes the size of each dimension in the space we are returning values for, containing * one value per dimension of this tensor (in order). Each size may be the same or smaller * than the corresponding size of this tensor */ public Iterator<SubspaceIterator> subspaceIterator(Set<String> dimensions, DimensionSizes sizes) { return new SuperspaceIterator(dimensions, sizes); } /** Returns a subspace iterator having the sizes of the dimensions of this tensor */ public Iterator<SubspaceIterator> subspaceIterator(Set<String> dimensions) { return subspaceIterator(dimensions, dimensionSizes); } /** * Returns the value at the given indexes as a double * * @param indexes the indexes into the dimensions of this. Must be one number per dimension of this * @throws IllegalArgumentException if any of the indexes are out of bound or a wrong number of indexes are given */ public double get(long ... indexes) { return get((int)toValueIndex(indexes, dimensionSizes)); } /** * Returns the value at the given indexes as a float * * @param indexes the indexes into the dimensions of this. Must be one number per dimension of this * @throws IllegalArgumentException if any of the indexes are out of bound or a wrong number of indexes are given */ public float getFloat(long ... indexes) { return getFloat((int)toValueIndex(indexes, dimensionSizes)); } /** Returns the value at this address, or NaN if there is no value at this address */ @Override public double get(TensorAddress address) { try { return get((int)toValueIndex(address, dimensionSizes, type)); } catch (IllegalArgumentException e) { return Double.NaN; } } /** * Returns the value at the given <i>standard value order</i> index as a double. * * @param valueIndex the direct index into the underlying data. * @throws IllegalArgumentException if index is out of bounds */ public abstract double get(long valueIndex); /** * Returns the value at the given <i>standard value order</i> index as a float. * * @param valueIndex the direct index into the underlying data. * @throws IllegalArgumentException if index is out of bounds */ public abstract float getFloat(long valueIndex); static long toValueIndex(long[] indexes, DimensionSizes sizes) { if (indexes.length == 1) return indexes[0]; if (indexes.length == 0) return 0; long valueIndex = 0; for (int i = 0; i < indexes.length; i++) { if (indexes[i] >= sizes.size(i)) throw new IllegalArgumentException(Arrays.toString(indexes) + " are not within bounds"); valueIndex += productOfDimensionsAfter(i, sizes) * indexes[i]; } return valueIndex; } static long toValueIndex(TensorAddress address, DimensionSizes sizes, TensorType type) { if (address.isEmpty()) return 0; long valueIndex = 0; for (int i = 0; i < address.size(); i++) { if (address.numericLabel(i) >= sizes.size(i)) throw new IllegalArgumentException(address + " is not within the bounds of " + type); valueIndex += productOfDimensionsAfter(i, sizes) * address.numericLabel(i); } return valueIndex; } private static long productOfDimensionsAfter(int afterIndex, DimensionSizes sizes) { long product = 1; for (int i = afterIndex + 1; i < sizes.dimensions(); i++) product *= sizes.size(i); return product; } void throwOnIncompatibleType(TensorType type) { if ( ! this.type().isRenamableTo(type)) throw new IllegalArgumentException("Can not change type from " + this.type() + " to " + type + ": Types are not compatible"); } @Override public TensorType type() { return type; } @Override public abstract IndexedTensor withType(TensorType type); public DimensionSizes dimensionSizes() { return dimensionSizes; } @Override public Map<TensorAddress, Double> cells() { if (dimensionSizes.dimensions() == 0) return Collections.singletonMap(TensorAddress.of(), get(0)); ImmutableMap.Builder<TensorAddress, Double> builder = new ImmutableMap.Builder<>(); Indexes indexes = Indexes.of(dimensionSizes, dimensionSizes, size()); for (long i = 0; i < size(); i++) { indexes.next(); builder.put(indexes.toAddress(), get(i)); } return builder.build(); } @Override public Tensor remove(Set<TensorAddress> addresses) { throw new IllegalArgumentException("Remove is not supported for indexed tensors"); } @Override public String toString() { if (type.rank() == 0) return Tensor.toStandardString(this); if (type.dimensions().stream().anyMatch(d -> d.size().isEmpty())) return Tensor.toStandardString(this); Indexes indexes = Indexes.of(dimensionSizes); StringBuilder b = new StringBuilder(type.toString()).append(":"); indexedBlockToString(this, indexes, b); return b.toString(); } @Override public boolean equals(Object other) { if ( ! ( other instanceof Tensor)) return false; return Tensor.equals(this, ((Tensor)other)); } public abstract static class Builder implements Tensor.Builder { final TensorType type; private Builder(TensorType type) { this.type = type; } public static Builder of(TensorType type) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type)); else return new UnboundBuilder(type); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor. This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, float[] values) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type), values); else return new UnboundBuilder(type); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor. This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, double[] values) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type), values); else return new UnboundBuilder(type); } /** * Create a builder with dimension size information for this instance. Must be one size entry per dimension, * and, agree with the type size information when specified in the type. * If sizes are completely specified in the type this size information is redundant. */ public static Builder of(TensorType type, DimensionSizes sizes) { validate(type, sizes); switch (type.valueType()) { case DOUBLE: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes); case FLOAT: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); case BFLOAT16: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); case INT8: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); default: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes); } } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor in the <i>standard value order</i>. * This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, DimensionSizes sizes, float[] values) { validate(type, sizes); validateSizes(sizes, values.length); switch (type.valueType()) { case DOUBLE: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes).fill(values); case FLOAT: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes, values); case BFLOAT16: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes, values); case INT8: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes, values); default: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes).fill(values); } } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor in the <i>standard value order</i>. * This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, DimensionSizes sizes, double[] values) { validate(type, sizes); validateSizes(sizes, values.length); switch (type.valueType()) { case DOUBLE: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes, values); case FLOAT: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); case BFLOAT16: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); case INT8: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); default: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes, values); } } private static void validateSizes(DimensionSizes sizes, int length) { if (sizes.totalSize() != length) { throw new IllegalArgumentException("Invalid size(" + length + ") of supplied value vector." + " Type specifies that size should be " + sizes.totalSize()); } } private static void validate(TensorType type, DimensionSizes sizes) { if (sizes.dimensions() != type.dimensions().size()) throw new IllegalArgumentException(sizes.dimensions() + " is the wrong number of dimensions for " + type); for (int i = 0; i < sizes.dimensions(); i++ ) { Optional<Long> size = type.dimensions().get(i).size(); if (size.isPresent() && size.get() < sizes.size(i)) throw new IllegalArgumentException("Size of dimension " + type.dimensions().get(i).name() + " is " + sizes.size(i) + " but cannot be larger than " + size.get() + " in " + type); } } public abstract Builder cell(double value, long ... indexes); public abstract Builder cell(float value, long ... indexes); @Override public TensorType type() { return type; } @Override public abstract IndexedTensor build(); } public interface DirectIndexBuilder { TensorType type(); /** Sets a value by its <i>standard value order</i> index */ void cellByDirectIndex(long index, double value); /** Sets a value by its <i>standard value order</i> index */ void cellByDirectIndex(long index, float value); } /** A bound builder can create the double array directly */ public static abstract class BoundBuilder extends Builder implements DirectIndexBuilder { private DimensionSizes sizes; private static DimensionSizes dimensionSizesOf(TensorType type) { DimensionSizes.Builder b = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < type.dimensions().size(); i++) b.set(i, type.dimensions().get(i).size().get()); return b.build(); } BoundBuilder(TensorType type, DimensionSizes sizes) { super(type); if ( sizes.dimensions() != type.dimensions().size()) throw new IllegalArgumentException("Must have a dimension size entry for each dimension in " + type); this.sizes = sizes; } BoundBuilder fill(float[] values) { long index = 0; for (float value : values) { cellByDirectIndex(index++, value); } return this; } BoundBuilder fill(double[] values) { long index = 0; for (double value : values) { cellByDirectIndex(index++, value); } return this; } DimensionSizes sizes() { return sizes; } } /** * A builder used when we don't know the size of the dimensions up front. * All values is all dimensions must be specified. */ private static class UnboundBuilder extends Builder { /** List of List or Double */ private List<Object> firstDimension = null; private UnboundBuilder(TensorType type) { super(type); } @Override public IndexedTensor build() { if (firstDimension == null) throw new IllegalArgumentException("Tensor of type " + type() + " has no values"); if (type.dimensions().isEmpty()) return new IndexedDoubleTensor(type, new DimensionSizes.Builder(type.dimensions().size()).build(), new double[] {(Double) firstDimension.get(0) }); DimensionSizes dimensionSizes = findDimensionSizes(firstDimension); double[] values = new double[(int)dimensionSizes.totalSize()]; fillValues(0, 0, firstDimension, dimensionSizes, values); return new IndexedDoubleTensor(type, dimensionSizes, values); } private DimensionSizes findDimensionSizes(List<Object> firstDimension) { List<Long> dimensionSizeList = new ArrayList<>(type.dimensions().size()); findDimensionSizes(0, dimensionSizeList, firstDimension); DimensionSizes.Builder b = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < b.dimensions(); i++) { if (i < dimensionSizeList.size()) b.set(i, dimensionSizeList.get(i)); } return b.build(); } @SuppressWarnings("unchecked") private void findDimensionSizes(int currentDimensionIndex, List<Long> dimensionSizes, List<Object> currentDimension) { if (currentDimensionIndex == dimensionSizes.size()) dimensionSizes.add((long)currentDimension.size()); else if (dimensionSizes.get(currentDimensionIndex) != currentDimension.size()) throw new IllegalArgumentException("Missing values in dimension " + type.dimensions().get(currentDimensionIndex) + " in " + type); for (Object value : currentDimension) if (value instanceof List) findDimensionSizes(currentDimensionIndex + 1, dimensionSizes, (List<Object>)value); } @SuppressWarnings("unchecked") private void fillValues(int currentDimensionIndex, long offset, List<Object> currentDimension, DimensionSizes sizes, double[] values) { if (currentDimensionIndex < sizes.dimensions() - 1) { for (long i = 0; i < currentDimension.size(); i++) fillValues(currentDimensionIndex + 1, offset + productOfDimensionsAfter(currentDimensionIndex, sizes) * i, (List<Object>) currentDimension.get((int)i), sizes, values); } else { for (long i = 0; i < currentDimension.size(); i++) { values[(int)(offset + i)] = nullAsZero((Double)currentDimension.get((int)i)); } } } private double nullAsZero(Double value) { if (value == null) return 0; return value; } @Override public CellBuilder cell() { return new CellBuilder(type, this); } @Override public Builder cell(TensorAddress address, float value) { return cell(address, (double)value); } @Override public Builder cell(TensorAddress address, double value) { long[] indexes = new long[address.size()]; for (int i = 0; i < address.size(); i++) { indexes[i] = address.numericLabel(i); } cell(value, indexes); return this; } @Override public Builder cell(float value, long... indexes) { return cell((double)value, indexes); } /** * Set a value using an index API. The number of indexes must be the same as the dimensions in the type of this. * Values can be written in any order but all values needed to make this dense must be provided * before building this. * * @return this for chaining */ @SuppressWarnings("unchecked") @Override public Builder cell(double value, long... indexes) { if (indexes.length != type.dimensions().size()) throw new IllegalArgumentException("Wrong number of indexes (" + indexes.length + ") for " + type); if (indexes.length == 0) { firstDimension = Collections.singletonList(value); return this; } if (firstDimension == null) firstDimension = new ArrayList<>(); List<Object> currentValues = firstDimension; for (int dimensionIndex = 0; dimensionIndex < indexes.length; dimensionIndex++) { ensureCapacity(indexes[dimensionIndex], currentValues); if (dimensionIndex == indexes.length - 1) { currentValues.set((int)indexes[dimensionIndex], value); } else { if (currentValues.get((int)indexes[dimensionIndex]) == null) currentValues.set((int)indexes[dimensionIndex], new ArrayList<>()); currentValues = (List<Object>) currentValues.get((int)indexes[dimensionIndex]); } } return this; } /** Fill the given list with nulls if necessary to make sure it has a (possibly null) value at the given index */ private void ensureCapacity(long index, List<Object> list) { while (list.size() <= index) list.add(list.size(), null); } } private final class CellIterator implements Iterator<Cell> { private long count = 0; private final Indexes indexes = Indexes.of(dimensionSizes, dimensionSizes, size()); private final LazyCell reusedCell = new LazyCell(indexes, Double.NaN); @Override public boolean hasNext() { return count < indexes.size(); } @Override public Cell next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + indexes); count++; indexes.next(); reusedCell.value = get(indexes.toSourceValueIndex()); return reusedCell; } } private final class ValueIterator implements Iterator<Double> { private long count = 0; @Override public boolean hasNext() { return count < size(); } @Override public Double next() { try { return get(count++); } catch (IllegalArgumentException e) { throw new NoSuchElementException("No element at position " + count); } } } private final class SuperspaceIterator implements Iterator<SubspaceIterator> { private final Indexes superindexes; /** The indexes this should iterate over */ private final List<Integer> subdimensionIndexes; /** * The sizes of the space we'll return values of, one value for each dimension of this tensor, * which may be equal to or smaller than the sizes of this tensor */ private final DimensionSizes iterateSizes; private long count = 0; private SuperspaceIterator(Set<String> superdimensionNames, DimensionSizes iterateSizes) { this.iterateSizes = iterateSizes; List<Integer> superdimensionIndexes = new ArrayList<>(superdimensionNames.size()); subdimensionIndexes = new ArrayList<>(superdimensionNames.size()); for (int i = type.dimensions().size() - 1; i >= 0; i-- ) { if (superdimensionNames.contains(type.dimensions().get(i).name())) superdimensionIndexes.add(i); else subdimensionIndexes.add(i); } superindexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, superdimensionIndexes); } @Override public boolean hasNext() { return count < superindexes.size(); } @Override public SubspaceIterator next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + superindexes); count++; superindexes.next(); return new SubspaceIterator(subdimensionIndexes, superindexes.indexesCopy(), iterateSizes); } } /** * An iterator over a subspace of this tensor. This is exposed to allow clients to query the size. * NOTE THAT the Cell returned by next is only valid until the next() call is made. * This is a concession to performance due to this typically being used in inner loops. */ public final class SubspaceIterator implements Iterator<Tensor.Cell> { /** * This iterator will iterate over the given dimensions, in the order given * (the first dimension index given is incremented to exhaustion first (i.e is etc.). * This may be any subset of the dimensions given by address and dimensionSizes. */ private final List<Integer> iterateDimensions; private final long[] address; private final DimensionSizes iterateSizes; private Indexes indexes; private long count = 0; /** A lazy cell for reuse */ private final LazyCell reusedCell; /** * Creates a new subspace iterator * * @param iterateDimensions the dimensions to iterate over, given as indexes in the dimension order of the * type of the tensor this iterates over. This iterator will iterate over these * dimensions to exhaustion in the order given (the first dimension index given is * incremented to exhaustion first etc., while other dimensions will be held * at a constant position. * This may be any subset of the dimensions given by address and dimensionSizes. * This is treated as immutable. * @param address the address of the first cell of this subspace. */ private SubspaceIterator(List<Integer> iterateDimensions, long[] address, DimensionSizes iterateSizes) { this.iterateDimensions = iterateDimensions; this.address = address; this.iterateSizes = iterateSizes; this.indexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, iterateDimensions, address); reusedCell = new LazyCell(indexes, Double.NaN); } /** Returns the total number of cells in this subspace */ public long size() { return indexes.size(); } /** Returns the address of the cell this currently points to (which may be an invalid position) */ public TensorAddress address() { return indexes.toAddress(); } /** Rewind this iterator to the first element */ public void reset() { this.count = 0; this.indexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, iterateDimensions, address); } @Override public boolean hasNext() { return count < indexes.size(); } /** Returns the next cell, which is valid until next() is called again */ @Override public Cell next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + indexes); count++; indexes.next(); reusedCell.value = get(indexes.toSourceValueIndex()); return reusedCell; } } /** A Cell which does not compute its TensorAddress unless it really has to */ private final static class LazyCell extends Tensor.Cell { private double value; private Indexes indexes; private LazyCell(Indexes indexes, Double value) { super(null, value); this.indexes = indexes; } @Override long getDirectIndex() { return indexes.toIterationValueIndex(); } @Override public TensorAddress getKey() { return indexes.toAddress(); } @Override public Double getValue() { return value; } @Override public Cell detach() { return new Cell(getKey(), value); } } /** * An array of indexes into this tensor which are able to find the next index in the value order. * next() can be called once per element in the dimensions we iterate over. It must be called once * before accessing the first position. */ public abstract static class Indexes { private final DimensionSizes sourceSizes; private final DimensionSizes iterationSizes; protected final long[] indexes; /** * Create indexes from a type containing bound indexed dimensions only. * * @throws IllegalStateException if the type contains dimensions which are not bound and indexed */ public static Indexes of(TensorType type) { return of(DimensionSizes.of(type)); } public static Indexes of(TensorType type, List<String> iterateDimensionOrder) { return of(DimensionSizes.of(type), toIterationOrder(iterateDimensionOrder, type)); } public static Indexes of(DimensionSizes sizes) { return of(sizes, sizes); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes) { return of(sourceSizes, iterateSizes, completeIterationOrder(iterateSizes.dimensions())); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long size) { return of(sourceSizes, iterateSizes, completeIterationOrder(iterateSizes.dimensions()), size); } private static Indexes of(DimensionSizes sizes, List<Integer> iterateDimensions) { return of(sizes, sizes, iterateDimensions); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions) { return of(sourceSizes, iterateSizes, iterateDimensions, computeSize(iterateSizes, iterateDimensions)); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long size) { return of(sourceSizes, iterateSizes, iterateDimensions, new long[iterateSizes.dimensions()], size); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes) { return of(sourceSizes, iterateSizes, iterateDimensions, initialIndexes, computeSize(iterateSizes, iterateDimensions)); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { if (size == 0) { return new EmptyIndexes(sourceSizes, iterateSizes, initialIndexes); } else if (size == 1) { return new SingleValueIndexes(sourceSizes, iterateSizes, initialIndexes); } else if (iterateDimensions.size() == 1) { if (sourceSizes.equals(iterateSizes)) return new EqualSizeSingleDimensionIndexes(sourceSizes, iterateDimensions.get(0), initialIndexes, size); else return new SingleDimensionIndexes(sourceSizes, iterateSizes, iterateDimensions.get(0), initialIndexes, size); } else { if (sourceSizes.equals(iterateSizes)) return new EqualSizeMultiDimensionIndexes(sourceSizes, iterateDimensions, initialIndexes, size); else return new MultiDimensionIndexes(sourceSizes, iterateSizes, iterateDimensions, initialIndexes, size); } } private static List<Integer> toIterationOrder(List<String> dimensionNames, TensorType type) { if (dimensionNames == null) return completeIterationOrder(type.rank()); List<Integer> iterationDimensions = new ArrayList<>(type.rank()); for (int i = 0; i < type.rank(); i++) iterationDimensions.add(type.rank() - 1 - type.indexOfDimension(dimensionNames.get(i)).get()); return iterationDimensions; } /** Since the right dimensions binds closest, iteration order is the opposite of the tensor order */ private static List<Integer> completeIterationOrder(int length) { List<Integer> iterationDimensions = new ArrayList<>(length); for (int i = 0; i < length; i++) iterationDimensions.add(length - 1 - i); return iterationDimensions; } private Indexes(DimensionSizes sourceSizes, DimensionSizes iterationSizes, long[] indexes) { this.sourceSizes = sourceSizes; this.iterationSizes = iterationSizes; this.indexes = indexes; } private static long computeSize(DimensionSizes sizes, List<Integer> iterateDimensions) { long size = 1; for (int iterateDimension : iterateDimensions) size *= sizes.size(iterateDimension); return size; } /** Returns the address of the current position of these indexes */ public TensorAddress toAddress() { return TensorAddress.of(indexes); } public long[] indexesCopy() { return Arrays.copyOf(indexes, indexes.length); } /** Returns a copy of the indexes of this which must not be modified */ public long[] indexesForReading() { return indexes; } public long toSourceValueIndex() { return IndexedTensor.toValueIndex(indexes, sourceSizes); } long toIterationValueIndex() { return IndexedTensor.toValueIndex(indexes, iterationSizes); } DimensionSizes dimensionSizes() { return iterationSizes; } /** Returns an immutable list containing a copy of the indexes in this */ public List<Long> toList() { ImmutableList.Builder<Long> builder = new ImmutableList.Builder<>(); for (long index : indexes) builder.add(index); return builder.build(); } @Override public String toString() { return "indexes " + Arrays.toString(indexes); } public abstract long size(); public abstract void next(); /** Returns whether further values are available by calling next() */ public abstract boolean hasNext(); /** Returns the number of dimensions in iteration order which are currently at the start position (0) */ abstract int nextDimensionsAtStart(); /** Returns the number of dimensions in iteration order which are currently at their end position */ abstract int nextDimensionsAtEnd(); } private final static class EmptyIndexes extends Indexes { private EmptyIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long[] indexes) { super(sourceSizes, iterateSizes, indexes); } @Override public long size() { return 0; } @Override public void next() {} @Override public boolean hasNext() { return false; } @Override int nextDimensionsAtStart() { return 0; } @Override int nextDimensionsAtEnd() { return 0; } } private final static class SingleValueIndexes extends Indexes { private boolean exhausted = false; private SingleValueIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long[] indexes) { super(sourceSizes, iterateSizes, indexes); } @Override public long size() { return 1; } @Override public void next() { exhausted = true; } @Override public boolean hasNext() { return ! exhausted; } @Override int nextDimensionsAtStart() { return 1; } @Override int nextDimensionsAtEnd() { return 1; } } private static class MultiDimensionIndexes extends Indexes { private final long size; private final List<Integer> iterateDimensions; private MultiDimensionIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { super(sourceSizes, iterateSizes, initialIndexes); this.iterateDimensions = iterateDimensions; this.size = size; indexes[iterateDimensions.get(0)]--; } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { int iterateDimensionsIndex = 0; while ( indexes[iterateDimensions.get(iterateDimensionsIndex)] + 1 == dimensionSizes().size(iterateDimensions.get(iterateDimensionsIndex))) { indexes[iterateDimensions.get(iterateDimensionsIndex)] = 0; iterateDimensionsIndex++; } indexes[iterateDimensions.get(iterateDimensionsIndex)]++; } @Override public boolean hasNext() { for (int iterateDimension : iterateDimensions) { if (indexes[iterateDimension] + 1 < dimensionSizes().size(iterateDimension)) return true; } return false; } @Override int nextDimensionsAtStart() { int dimension = 0; while (dimension < iterateDimensions.size() && indexes[iterateDimensions.get(dimension)] == 0) dimension++; return dimension; } @Override int nextDimensionsAtEnd() { int dimension = 0; while (dimension < iterateDimensions.size() && indexes[iterateDimensions.get(dimension)] == dimensionSizes().size(iterateDimensions.get(dimension)) - 1) dimension++; return dimension; } } /** In this case we can reuse the source index computation for the iteration index */ private final static class EqualSizeMultiDimensionIndexes extends MultiDimensionIndexes { private long lastComputedSourceValueIndex = -1; private EqualSizeMultiDimensionIndexes(DimensionSizes sizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { super(sizes, sizes, iterateDimensions, initialIndexes, size); } @Override public long toSourceValueIndex() { return lastComputedSourceValueIndex = super.toSourceValueIndex(); } @Override long toIterationValueIndex() { return lastComputedSourceValueIndex; } } /** In this case we can keep track of indexes using a step instead of using the more elaborate computation */ private final static class SingleDimensionIndexes extends Indexes { private final long size; private final int iterateDimension; /** Maintain this directly as an optimization for 1-d iteration */ private long currentSourceValueIndex, currentIterationValueIndex; /** The iteration step in the value index space */ private final long sourceStep, iterationStep; private SingleDimensionIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, int iterateDimension, long[] initialIndexes, long size) { super(sourceSizes, iterateSizes, initialIndexes); this.iterateDimension = iterateDimension; this.size = size; this.sourceStep = productOfDimensionsAfter(iterateDimension, sourceSizes); this.iterationStep = productOfDimensionsAfter(iterateDimension, iterateSizes); indexes[iterateDimension]--; currentSourceValueIndex = IndexedTensor.toValueIndex(indexes, sourceSizes); currentIterationValueIndex = IndexedTensor.toValueIndex(indexes, iterateSizes); } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { indexes[iterateDimension]++; currentSourceValueIndex += sourceStep; currentIterationValueIndex += iterationStep; } @Override public long toSourceValueIndex() { return currentSourceValueIndex; } @Override long toIterationValueIndex() { return currentIterationValueIndex; } @Override public boolean hasNext() { return indexes[iterateDimension] + 1 < size; } @Override int nextDimensionsAtStart() { return currentSourceValueIndex == 0 ? 1 : 0; } @Override int nextDimensionsAtEnd() { return currentSourceValueIndex == size - 1 ? 1 : 0; } } /** In this case we only need to keep track of one index */ private final static class EqualSizeSingleDimensionIndexes extends Indexes { private final long size; private final int iterateDimension; /** Maintain this directly as an optimization for 1-d iteration */ private long currentValueIndex; /** The iteration step in the value index space */ private final long step; private EqualSizeSingleDimensionIndexes(DimensionSizes sizes, int iterateDimension, long[] initialIndexes, long size) { super(sizes, sizes, initialIndexes); this.iterateDimension = iterateDimension; this.size = size; this.step = productOfDimensionsAfter(iterateDimension, sizes); indexes[iterateDimension]--; currentValueIndex = IndexedTensor.toValueIndex(indexes, sizes); } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { indexes[iterateDimension]++; currentValueIndex += step; } @Override public boolean hasNext() { return indexes[iterateDimension] + 1 < size; } @Override public long toSourceValueIndex() { return currentValueIndex; } @Override long toIterationValueIndex() { return currentValueIndex; } @Override int nextDimensionsAtStart() { return currentValueIndex == 0 ? 1 : 0; } @Override int nextDimensionsAtEnd() { return currentValueIndex == size - 1 ? 1 : 0; } } }
while this looks like it will promote int8->float, it won't work correctly in many cases, and now it's not longer doing "largestOf" anymore. It's probably best to make this actually "largestOf" and do something else when type resolving the various operations.
public static Value largestOf(Value value1, Value value2) { if (value1 == DOUBLE || value2 == DOUBLE) return DOUBLE; if (value1 == FLOAT || value2 == FLOAT) return FLOAT; if (value1 == BFLOAT16 || value2 == BFLOAT16) return FLOAT; if (value1 == INT8 || value2 == INT8) return FLOAT; return FLOAT; }
if (value1 == FLOAT || value2 == FLOAT) return FLOAT;
public static Value largestOf(Value value1, Value value2) { if (value1 == DOUBLE || value2 == DOUBLE) return DOUBLE; if (value1 == FLOAT || value2 == FLOAT) return FLOAT; if (value1 == BFLOAT16 || value2 == BFLOAT16) return BFLOAT16; return INT8; }
class TensorType { static Ascii7BitMatcher labelMatcher = new Ascii7BitMatcher("-_@" + charsAndNumbers(), "_@$" + charsAndNumbers()); /** The permissible cell value types. Default is double. */ public enum Value { DOUBLE("double"), FLOAT("float"), INT8("int8"), BFLOAT16("bfloat16"); private final String id; Value(String id) { this.id = id; } public String id() { return id; } public boolean isEqualOrLargerThan(TensorType.Value other) { return this == other || largestOf(this, other) == this; } public static Value largestOf(List<Value> values) { if (values.isEmpty()) return Value.DOUBLE; Value largest = null; for (Value value : values) { if (largest == null) largest = value; else largest = largestOf(largest, value); } return largest; } @Override public String toString() { return name().toLowerCase(); } public static Value fromId(String valueTypeString) { switch (valueTypeString) { case "double" : return Value.DOUBLE; case "float" : return Value.FLOAT; case "bfloat16" : return Value.BFLOAT16; case "int8" : return Value.INT8; default : throw new IllegalArgumentException("Value type must be either 'double', 'float', " + "'bfloat16', or 'int8' but was '" + valueTypeString + "'"); } } }; /** The empty tensor type - which is the same as a double */ public static final TensorType empty = new TensorType(Value.DOUBLE, Collections.emptyList()); private final Value valueType; /** Sorted list of the dimensions of this */ private final ImmutableList<Dimension> dimensions; private final TensorType mappedSubtype; public TensorType(Value valueType, Collection<Dimension> dimensions) { this.valueType = valueType; List<Dimension> dimensionList = new ArrayList<>(dimensions); Collections.sort(dimensionList); this.dimensions = ImmutableList.copyOf(dimensionList); if (dimensionList.stream().allMatch(d -> d.isIndexed())) mappedSubtype = empty; else if (dimensionList.stream().noneMatch(d -> d.isIndexed())) mappedSubtype = this; else mappedSubtype = new TensorType(valueType, dimensions.stream().filter(d -> ! d.isIndexed()).collect(Collectors.toList())); } static public Value combinedValueType(TensorType ... types) { List<Value> valueTypes = new ArrayList<>(); for (TensorType type : types) { if (type.rank() > 0) { valueTypes.add(type.valueType()); } } return Value.largestOf(valueTypes); } /** * Returns a tensor type instance from a * <a href="https: * <code>tensor(dimension1, dimension2, ...)</code> * where each dimension is either * <ul> * <li><code>dimension-name[]</code> - an unbound indexed dimension * <li><code>dimension-name[int]</code> - an bound indexed dimension * <li><code>dimension-name{}</code> - a mapped dimension * </ul> * Example: <code>tensor(x[10],y[20])</code> (a matrix) */ public static TensorType fromSpec(String specString) { return TensorTypeParser.fromSpec(specString); } /** Returns the numeric type of the cell values of this */ public Value valueType() { return valueType; } /** The type representing the mapped subset of dimensions of this. */ public TensorType mappedSubtype() { return mappedSubtype; } /** Returns the number of dimensions of this: dimensions().size() */ public int rank() { return dimensions.size(); } /** Returns an immutable list of the dimensions of this */ public List<Dimension> dimensions() { return dimensions; } /** Returns an immutable set of the names of the dimensions of this */ public Set<String> dimensionNames() { return dimensions.stream().map(Dimension::name).collect(Collectors.toSet()); } /** Returns the dimension with this name, or empty if not present */ public Optional<Dimension> dimension(String name) { return indexOfDimension(name).map(i -> dimensions.get(i)); } /** Returns the 0-base index of this dimension, or empty if it is not present */ public Optional<Integer> indexOfDimension(String dimension) { for (int i = 0; i < dimensions.size(); i++) if (dimensions.get(i).name().equals(dimension)) return Optional.of(i); return Optional.empty(); } /* Returns the bound of this dimension if it is present and bound in this, empty otherwise */ public Optional<Long> sizeOfDimension(String dimension) { Optional<Dimension> d = dimension(dimension); if ( ! d.isPresent()) return Optional.empty(); return d.get().size(); } /** * Returns whether this type can be assigned to the given type, * i.e if the given type is a generalization of this type. */ public boolean isAssignableTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, false, true); } /** * Returns whether this type can be converted to the given type. * This is true if this type isAssignableTo the given type or * if it is not assignable only because it has a shorter dimension length * than the given type in some shared dimension(s), as it can then be * converted to the given type by zero padding. */ public boolean isConvertibleTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, true, true); } /** * Returns whether or not this type can simply be renamed to * the given type. This is the same as being assignable, but disregarding * dimension names. */ public boolean isRenamableTo(TensorType other) { return isConvertibleOrAssignableTo(other, false, false); } private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible, boolean considerName) { if ( ! generalization.valueType().isEqualOrLargerThan(this.valueType) ) return false; if (generalization.dimensions().size() != this.dimensions().size()) return false; for (int i = 0; i < generalization.dimensions().size(); i++) { Dimension thisDimension = this.dimensions().get(i); Dimension generalizationDimension = generalization.dimensions().get(i); if (thisDimension.isIndexed() != generalizationDimension.isIndexed()) return false; if (considerName && ! thisDimension.name().equals(generalizationDimension.name())) return false; if (generalizationDimension.size().isPresent()) { if ( ! thisDimension.size().isPresent()) return false; if (convertible) { if (thisDimension.size().get() > generalizationDimension.size().get()) return false; } else { if (!thisDimension.size().get().equals(generalizationDimension.size().get())) return false; } } } return true; } @Override public String toString() { return "tensor" + (valueType == Value.DOUBLE ? "" : "<" + valueType.id() + ">") + "(" + dimensions.stream().map(Dimension::toString).collect(Collectors.joining(",")) + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TensorType other = (TensorType)o; if ( (this.rank() == 0) && (other.rank() == 0)) return true; if ( this.valueType != other.valueType) return false; if ( ! this.dimensions.equals(other.dimensions)) return false; return true; } /** Returns whether the given type has the same dimension names as this */ public boolean mathematicallyEquals(TensorType other) { if (dimensions().size() != other.dimensions().size()) return false; for (int i = 0; i < dimensions().size(); i++) if (!dimensions().get(i).name().equals(other.dimensions().get(i).name())) return false; return true; } /** * Returns the dimensionwise generalization of this and the given type, or empty if no generalization exists. * A dimensionwise generalization exists if the two tensors share the same dimensions, and each dimension * is compatible. * For example, the dimensionwise generalization of tensor(x[],y[5]) and tensor(x[5],y[]) is tensor(x[],y[]) */ public Optional<TensorType> dimensionwiseGeneralizationWith(TensorType other) { if (this.equals(other)) return Optional.of(this); if (this.dimensions.size() != other.dimensions.size()) return Optional.empty(); Builder b = new Builder(TensorType.Value.largestOf(valueType, other.valueType)); for (int i = 0; i < dimensions.size(); i++) { Dimension thisDim = this.dimensions().get(i); Dimension otherDim = other.dimensions().get(i); if ( ! thisDim.name().equals(otherDim.name())) return Optional.empty(); if (thisDim.isIndexed() && otherDim.isIndexed()) { if (thisDim.size().isPresent() && otherDim.size().isPresent()) { if ( ! thisDim.size().get().equals(otherDim.size().get())) return Optional.empty(); b.dimension(thisDim); } else if (thisDim.size().isPresent()) { b.dimension(otherDim); } else if (otherDim.size().isPresent()) { b.dimension(thisDim); } else { b.dimension(thisDim); } } else if ( ! thisDim.isIndexed() && ! otherDim.isIndexed()) { b.dimension(thisDim); } else { return Optional.empty(); } } return Optional.of(b.build()); } @Override public int hashCode() { return Objects.hash(dimensions, valueType); } /** * A tensor dimension. * Dimensions have the natural order of their names. */ public static abstract class Dimension implements Comparable<Dimension> { public enum Type { indexedBound, indexedUnbound, mapped } private final String name; private Dimension(String name) { this.name = requireIdentifier(name); } public final String name() { return name; } /** Returns the size of this dimension if it is bound, empty otherwise */ public abstract Optional<Long> size(); public abstract Type type(); /** Returns a copy of this with the name set to the given name */ public abstract Dimension withName(String name); /** Returns true if this is an indexed bound or unbound type */ public boolean isIndexed() { return type() == Type.indexedBound || type() == Type.indexedUnbound; } /** Returns true if this is of the mapped type */ public boolean isMapped() { return type() == Type.mapped; } /** * Returns the dimension resulting from combining two dimensions having the same name but possibly different * types: * * [N] + [M] = [ min(N, M) ] * [N] + [] = [] * [] + {} = {} */ Dimension combineWith(Optional<Dimension> other, boolean allowDifferentSizes) { if ( ! other.isPresent()) return this; if (this instanceof MappedDimension) return this; if (other.get() instanceof MappedDimension) return other.get(); if (this instanceof IndexedUnboundDimension) return this; if (other.get() instanceof IndexedUnboundDimension) return other.get(); IndexedBoundDimension thisIb = (IndexedBoundDimension)this; IndexedBoundDimension otherIb = (IndexedBoundDimension)other.get(); if (allowDifferentSizes) return thisIb.size().get() < otherIb.size().get() ? thisIb : otherIb; if ( ! thisIb.size().equals(otherIb.size())) throw new IllegalArgumentException("Unequal dimension sizes in " + thisIb + " and " + otherIb); return thisIb; } @Override public abstract String toString(); @Override public boolean equals(Object other) { if (this == other) return true; if (other == null || getClass() != other.getClass()) return false; return name.equals(((Dimension)other).name); } @Override public int hashCode() { return name.hashCode(); } @Override public int compareTo(Dimension other) { return this.name.compareTo(other.name); } public static Dimension indexed(String name, long size) { return new IndexedBoundDimension(name, size); } public static Dimension indexed(String name) { return new IndexedUnboundDimension(name); } public static Dimension mapped(String name) { return new MappedDimension(name); } static private String requireIdentifier(String name) { if (name == null) throw new IllegalArgumentException("A dimension name cannot be null"); if ( ! TensorType.labelMatcher.matches(name)) throw new IllegalArgumentException("A dimension name must be an identifier or integer, not '" + name + "'"); return name; } } public static class IndexedBoundDimension extends TensorType.Dimension { private final Long size; private IndexedBoundDimension(String name, long size) { super(name); if (size < 1) throw new IllegalArgumentException("Size of bound dimension '" + name + "' must be at least 1"); if (size > Integer.MAX_VALUE) throw new IllegalArgumentException("Size of bound dimension '" + name + "' cannot be larger than " + Integer.MAX_VALUE); this.size = size; } @Override public Optional<Long> size() { return Optional.of(size); } @Override public Type type() { return Type.indexedBound; } @Override public IndexedBoundDimension withName(String name) { return new IndexedBoundDimension(name, size); } @Override public String toString() { return name() + "[" + size + "]"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; IndexedBoundDimension that = (IndexedBoundDimension) o; if (!size.equals(that.size)) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + size.hashCode(); return result; } } public static class IndexedUnboundDimension extends TensorType.Dimension { private IndexedUnboundDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.indexedUnbound; } @Override public IndexedUnboundDimension withName(String name) { return new IndexedUnboundDimension(name); } @Override public String toString() { return name() + "[]"; } } public static class MappedDimension extends TensorType.Dimension { private MappedDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.mapped; } @Override public MappedDimension withName(String name) { return new MappedDimension(name); } @Override public String toString() { return name() + "{}"; } } public static class Builder { private final Map<String, Dimension> dimensions = new LinkedHashMap<>(); private final Value valueType; /** Creates an empty builder with cells of type double */ public Builder() { this(Value.DOUBLE); } public Builder(Value valueType) { this.valueType = valueType; } /** * Creates a builder containing a combination of the dimensions of the given types * * If the same dimension is indexed with different size restrictions the smallest size will be used. * If it is size restricted in one argument but not the other it will not be size restricted. * If it is indexed in one and mapped in the other it will become mapped. * * The value type will be the largest of the value types of the input types */ public Builder(TensorType ... types) { this(true, types); } public Builder(boolean allowDifferentSizes, TensorType ... types) { this.valueType = TensorType.combinedValueType(types); for (TensorType type : types) addDimensionsOf(type, allowDifferentSizes); } /** Creates a builder from the given dimensions, having double as the value type */ public Builder(Iterable<Dimension> dimensions) { this(Value.DOUBLE, dimensions); } /** Creates a builder from the given value type and dimensions */ public Builder(Value valueType, Iterable<Dimension> dimensions) { this.valueType = valueType; for (TensorType.Dimension dimension : dimensions) { dimension(dimension); } } private void addDimensionsOf(TensorType type, boolean allowDifferentSizes) { for (Dimension dimension : type.dimensions) { set(dimension.combineWith(Optional.ofNullable(dimensions.get(dimension.name())), allowDifferentSizes)); } } /** Returns the current number of dimensions in this */ public int rank() { return dimensions.size(); } /** * Adds a new dimension to this * * @throws IllegalArgumentException if the dimension is already present */ private Builder add(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); if (dimensions.containsKey(dimension.name())) throw new IllegalArgumentException("Could not add dimension " + dimension + " as this dimension " + "is already present"); dimensions.put(dimension.name(), dimension); return this; } /** Adds or replaces a dimension in this */ public Builder set(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); dimensions.put(dimension.name(), dimension); return this; } /** * Adds a bound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name, long size) { return add(new IndexedBoundDimension(name, size)); } /** * Adds an unbound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name) { return add(new IndexedUnboundDimension(name)); } /** * Adds a mapped dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder mapped(String name) { return add(new MappedDimension(name)); } /** Adds the given dimension */ public Builder dimension(Dimension dimension) { return add(dimension); } /** Returns the given dimension, or empty if none is present */ public Optional<Dimension> getDimension(String dimension) { return Optional.ofNullable(dimensions.get(dimension)); } public Builder dimension(String name, Dimension.Type type) { switch (type) { case mapped : mapped(name); break; case indexedUnbound : indexed(name); break; default : throw new IllegalArgumentException("This can not create a dimension of type " + type); } return this; } public TensorType build() { return new TensorType(valueType, dimensions.values()); } } }
class TensorType { static Ascii7BitMatcher labelMatcher = new Ascii7BitMatcher("-_@" + charsAndNumbers(), "_@$" + charsAndNumbers()); /** The permissible cell value types. Default is double. */ public enum Value { DOUBLE("double"), FLOAT("float"), INT8("int8"), BFLOAT16("bfloat16"); private final String id; Value(String id) { this.id = id; } public String id() { return id; } public boolean isEqualOrLargerThan(TensorType.Value other) { return this == other || largestOf(this, other) == this; } public static Value largestOf(List<Value> values) { if (values.isEmpty()) return Value.DOUBLE; Value largest = null; for (Value value : values) { if (largest == null) largest = value; else largest = largestOf(largest, value); } return largest; } @Override public String toString() { return name().toLowerCase(); } public static Value fromId(String valueTypeString) { for(Value value : Value.values()) { if (value.id.equals(valueTypeString)) { return value; } } throw new IllegalArgumentException("Value type must be either 'double', 'float', " + "'bfloat16', or 'int8' but was '" + valueTypeString + "'"); } }; /** The empty tensor type - which is the same as a double */ public static final TensorType empty = new TensorType(Value.DOUBLE, Collections.emptyList()); private final Value valueType; /** Sorted list of the dimensions of this */ private final ImmutableList<Dimension> dimensions; private final TensorType mappedSubtype; public TensorType(Value valueType, Collection<Dimension> dimensions) { this.valueType = valueType; List<Dimension> dimensionList = new ArrayList<>(dimensions); Collections.sort(dimensionList); this.dimensions = ImmutableList.copyOf(dimensionList); if (dimensionList.stream().allMatch(d -> d.isIndexed())) mappedSubtype = empty; else if (dimensionList.stream().noneMatch(d -> d.isIndexed())) mappedSubtype = this; else mappedSubtype = new TensorType(valueType, dimensions.stream().filter(d -> ! d.isIndexed()).collect(Collectors.toList())); } static public Value combinedValueType(TensorType ... types) { List<Value> valueTypes = new ArrayList<>(); for (TensorType type : types) { if (type.rank() > 0) { valueTypes.add(type.valueType()); } } return Value.largestOf(valueTypes); } /** * Returns a tensor type instance from a * <a href="https: * <code>tensor(dimension1, dimension2, ...)</code> * where each dimension is either * <ul> * <li><code>dimension-name[]</code> - an unbound indexed dimension * <li><code>dimension-name[int]</code> - an bound indexed dimension * <li><code>dimension-name{}</code> - a mapped dimension * </ul> * Example: <code>tensor(x[10],y[20])</code> (a matrix) */ public static TensorType fromSpec(String specString) { return TensorTypeParser.fromSpec(specString); } /** Returns the numeric type of the cell values of this */ public Value valueType() { return valueType; } /** The type representing the mapped subset of dimensions of this. */ public TensorType mappedSubtype() { return mappedSubtype; } /** Returns the number of dimensions of this: dimensions().size() */ public int rank() { return dimensions.size(); } /** Returns an immutable list of the dimensions of this */ public List<Dimension> dimensions() { return dimensions; } /** Returns an immutable set of the names of the dimensions of this */ public Set<String> dimensionNames() { return dimensions.stream().map(Dimension::name).collect(Collectors.toSet()); } /** Returns the dimension with this name, or empty if not present */ public Optional<Dimension> dimension(String name) { return indexOfDimension(name).map(i -> dimensions.get(i)); } /** Returns the 0-base index of this dimension, or empty if it is not present */ public Optional<Integer> indexOfDimension(String dimension) { for (int i = 0; i < dimensions.size(); i++) if (dimensions.get(i).name().equals(dimension)) return Optional.of(i); return Optional.empty(); } /* Returns the bound of this dimension if it is present and bound in this, empty otherwise */ public Optional<Long> sizeOfDimension(String dimension) { Optional<Dimension> d = dimension(dimension); if ( ! d.isPresent()) return Optional.empty(); return d.get().size(); } /** * Returns whether this type can be assigned to the given type, * i.e if the given type is a generalization of this type. */ public boolean isAssignableTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, false, true); } /** * Returns whether this type can be converted to the given type. * This is true if this type isAssignableTo the given type or * if it is not assignable only because it has a shorter dimension length * than the given type in some shared dimension(s), as it can then be * converted to the given type by zero padding. */ public boolean isConvertibleTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, true, true); } /** * Returns whether or not this type can simply be renamed to * the given type. This is the same as being assignable, but disregarding * dimension names. */ public boolean isRenamableTo(TensorType other) { return isConvertibleOrAssignableTo(other, false, false); } private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible, boolean considerName) { if ( ! generalization.valueType().isEqualOrLargerThan(this.valueType) ) return false; if (generalization.dimensions().size() != this.dimensions().size()) return false; for (int i = 0; i < generalization.dimensions().size(); i++) { Dimension thisDimension = this.dimensions().get(i); Dimension generalizationDimension = generalization.dimensions().get(i); if (thisDimension.isIndexed() != generalizationDimension.isIndexed()) return false; if (considerName && ! thisDimension.name().equals(generalizationDimension.name())) return false; if (generalizationDimension.size().isPresent()) { if ( ! thisDimension.size().isPresent()) return false; if (convertible) { if (thisDimension.size().get() > generalizationDimension.size().get()) return false; } else { if (!thisDimension.size().get().equals(generalizationDimension.size().get())) return false; } } } return true; } @Override public String toString() { return "tensor" + (valueType == Value.DOUBLE ? "" : "<" + valueType.id() + ">") + "(" + dimensions.stream().map(Dimension::toString).collect(Collectors.joining(",")) + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TensorType other = (TensorType)o; if ( (this.rank() == 0) && (other.rank() == 0)) return true; if ( this.valueType != other.valueType) return false; if ( ! this.dimensions.equals(other.dimensions)) return false; return true; } /** Returns whether the given type has the same dimension names as this */ public boolean mathematicallyEquals(TensorType other) { if (dimensions().size() != other.dimensions().size()) return false; for (int i = 0; i < dimensions().size(); i++) if (!dimensions().get(i).name().equals(other.dimensions().get(i).name())) return false; return true; } /** * Returns the dimensionwise generalization of this and the given type, or empty if no generalization exists. * A dimensionwise generalization exists if the two tensors share the same dimensions, and each dimension * is compatible. * For example, the dimensionwise generalization of tensor(x[],y[5]) and tensor(x[5],y[]) is tensor(x[],y[]) */ public Optional<TensorType> dimensionwiseGeneralizationWith(TensorType other) { if (this.equals(other)) return Optional.of(this); if (this.dimensions.size() != other.dimensions.size()) return Optional.empty(); Builder b = new Builder(TensorType.Value.largestOf(valueType, other.valueType)); for (int i = 0; i < dimensions.size(); i++) { Dimension thisDim = this.dimensions().get(i); Dimension otherDim = other.dimensions().get(i); if ( ! thisDim.name().equals(otherDim.name())) return Optional.empty(); if (thisDim.isIndexed() && otherDim.isIndexed()) { if (thisDim.size().isPresent() && otherDim.size().isPresent()) { if ( ! thisDim.size().get().equals(otherDim.size().get())) return Optional.empty(); b.dimension(thisDim); } else if (thisDim.size().isPresent()) { b.dimension(otherDim); } else if (otherDim.size().isPresent()) { b.dimension(thisDim); } else { b.dimension(thisDim); } } else if ( ! thisDim.isIndexed() && ! otherDim.isIndexed()) { b.dimension(thisDim); } else { return Optional.empty(); } } return Optional.of(b.build()); } @Override public int hashCode() { return Objects.hash(dimensions, valueType); } /** * A tensor dimension. * Dimensions have the natural order of their names. */ public static abstract class Dimension implements Comparable<Dimension> { public enum Type { indexedBound, indexedUnbound, mapped } private final String name; private Dimension(String name) { this.name = requireIdentifier(name); } public final String name() { return name; } /** Returns the size of this dimension if it is bound, empty otherwise */ public abstract Optional<Long> size(); public abstract Type type(); /** Returns a copy of this with the name set to the given name */ public abstract Dimension withName(String name); /** Returns true if this is an indexed bound or unbound type */ public boolean isIndexed() { return type() == Type.indexedBound || type() == Type.indexedUnbound; } /** Returns true if this is of the mapped type */ public boolean isMapped() { return type() == Type.mapped; } /** * Returns the dimension resulting from combining two dimensions having the same name but possibly different * types: * * [N] + [M] = [ min(N, M) ] * [N] + [] = [] * [] + {} = {} */ Dimension combineWith(Optional<Dimension> other, boolean allowDifferentSizes) { if ( ! other.isPresent()) return this; if (this instanceof MappedDimension) return this; if (other.get() instanceof MappedDimension) return other.get(); if (this instanceof IndexedUnboundDimension) return this; if (other.get() instanceof IndexedUnboundDimension) return other.get(); IndexedBoundDimension thisIb = (IndexedBoundDimension)this; IndexedBoundDimension otherIb = (IndexedBoundDimension)other.get(); if (allowDifferentSizes) return thisIb.size().get() < otherIb.size().get() ? thisIb : otherIb; if ( ! thisIb.size().equals(otherIb.size())) throw new IllegalArgumentException("Unequal dimension sizes in " + thisIb + " and " + otherIb); return thisIb; } @Override public abstract String toString(); @Override public boolean equals(Object other) { if (this == other) return true; if (other == null || getClass() != other.getClass()) return false; return name.equals(((Dimension)other).name); } @Override public int hashCode() { return name.hashCode(); } @Override public int compareTo(Dimension other) { return this.name.compareTo(other.name); } public static Dimension indexed(String name, long size) { return new IndexedBoundDimension(name, size); } public static Dimension indexed(String name) { return new IndexedUnboundDimension(name); } public static Dimension mapped(String name) { return new MappedDimension(name); } static private String requireIdentifier(String name) { if (name == null) throw new IllegalArgumentException("A dimension name cannot be null"); if ( ! TensorType.labelMatcher.matches(name)) throw new IllegalArgumentException("A dimension name must be an identifier or integer, not '" + name + "'"); return name; } } public static class IndexedBoundDimension extends TensorType.Dimension { private final Long size; private IndexedBoundDimension(String name, long size) { super(name); if (size < 1) throw new IllegalArgumentException("Size of bound dimension '" + name + "' must be at least 1"); if (size > Integer.MAX_VALUE) throw new IllegalArgumentException("Size of bound dimension '" + name + "' cannot be larger than " + Integer.MAX_VALUE); this.size = size; } @Override public Optional<Long> size() { return Optional.of(size); } @Override public Type type() { return Type.indexedBound; } @Override public IndexedBoundDimension withName(String name) { return new IndexedBoundDimension(name, size); } @Override public String toString() { return name() + "[" + size + "]"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; IndexedBoundDimension that = (IndexedBoundDimension) o; if (!size.equals(that.size)) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + size.hashCode(); return result; } } public static class IndexedUnboundDimension extends TensorType.Dimension { private IndexedUnboundDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.indexedUnbound; } @Override public IndexedUnboundDimension withName(String name) { return new IndexedUnboundDimension(name); } @Override public String toString() { return name() + "[]"; } } public static class MappedDimension extends TensorType.Dimension { private MappedDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.mapped; } @Override public MappedDimension withName(String name) { return new MappedDimension(name); } @Override public String toString() { return name() + "{}"; } } public static class Builder { private final Map<String, Dimension> dimensions = new LinkedHashMap<>(); private final Value valueType; /** Creates an empty builder with cells of type double */ public Builder() { this(Value.DOUBLE); } public Builder(Value valueType) { this.valueType = valueType; } /** * Creates a builder containing a combination of the dimensions of the given types * * If the same dimension is indexed with different size restrictions the smallest size will be used. * If it is size restricted in one argument but not the other it will not be size restricted. * If it is indexed in one and mapped in the other it will become mapped. * * The value type will be the largest of the value types of the input types */ public Builder(TensorType ... types) { this(true, types); } public Builder(boolean allowDifferentSizes, TensorType ... types) { this.valueType = TensorType.combinedValueType(types); for (TensorType type : types) addDimensionsOf(type, allowDifferentSizes); } /** Creates a builder from the given dimensions, having double as the value type */ public Builder(Iterable<Dimension> dimensions) { this(Value.DOUBLE, dimensions); } /** Creates a builder from the given value type and dimensions */ public Builder(Value valueType, Iterable<Dimension> dimensions) { this.valueType = valueType; for (TensorType.Dimension dimension : dimensions) { dimension(dimension); } } private void addDimensionsOf(TensorType type, boolean allowDifferentSizes) { for (Dimension dimension : type.dimensions) { set(dimension.combineWith(Optional.ofNullable(dimensions.get(dimension.name())), allowDifferentSizes)); } } /** Returns the current number of dimensions in this */ public int rank() { return dimensions.size(); } /** * Adds a new dimension to this * * @throws IllegalArgumentException if the dimension is already present */ private Builder add(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); if (dimensions.containsKey(dimension.name())) throw new IllegalArgumentException("Could not add dimension " + dimension + " as this dimension " + "is already present"); dimensions.put(dimension.name(), dimension); return this; } /** Adds or replaces a dimension in this */ public Builder set(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); dimensions.put(dimension.name(), dimension); return this; } /** * Adds a bound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name, long size) { return add(new IndexedBoundDimension(name, size)); } /** * Adds an unbound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name) { return add(new IndexedUnboundDimension(name)); } /** * Adds a mapped dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder mapped(String name) { return add(new MappedDimension(name)); } /** Adds the given dimension */ public Builder dimension(Dimension dimension) { return add(dimension); } /** Returns the given dimension, or empty if none is present */ public Optional<Dimension> getDimension(String dimension) { return Optional.ofNullable(dimensions.get(dimension)); } public Builder dimension(String name, Dimension.Type type) { switch (type) { case mapped : mapped(name); break; case indexedUnbound : indexed(name); break; default : throw new IllegalArgumentException("This can not create a dimension of type " + type); } return this; } public TensorType build() { return new TensorType(valueType, dimensions.values()); } } }
consider looping over Value.values() and comparing with their "id" instead of repeating the names here.
public static Value fromId(String valueTypeString) { switch (valueTypeString) { case "double" : return Value.DOUBLE; case "float" : return Value.FLOAT; case "bfloat16" : return Value.BFLOAT16; case "int8" : return Value.INT8; default : throw new IllegalArgumentException("Value type must be either 'double', 'float', " + "'bfloat16', or 'int8' but was '" + valueTypeString + "'"); } }
case "bfloat16" : return Value.BFLOAT16;
public static Value fromId(String valueTypeString) { for(Value value : Value.values()) { if (value.id.equals(valueTypeString)) { return value; } } throw new IllegalArgumentException("Value type must be either 'double', 'float', " + "'bfloat16', or 'int8' but was '" + valueTypeString + "'"); }
class TensorType { static Ascii7BitMatcher labelMatcher = new Ascii7BitMatcher("-_@" + charsAndNumbers(), "_@$" + charsAndNumbers()); /** The permissible cell value types. Default is double. */ public enum Value { DOUBLE("double"), FLOAT("float"), INT8("int8"), BFLOAT16("bfloat16"); private final String id; Value(String id) { this.id = id; } public String id() { return id; } public boolean isEqualOrLargerThan(TensorType.Value other) { return this == other || largestOf(this, other) == this; } public static Value largestOf(List<Value> values) { if (values.isEmpty()) return Value.DOUBLE; Value largest = null; for (Value value : values) { if (largest == null) largest = value; else largest = largestOf(largest, value); } return largest; } public static Value largestOf(Value value1, Value value2) { if (value1 == DOUBLE || value2 == DOUBLE) return DOUBLE; if (value1 == FLOAT || value2 == FLOAT) return FLOAT; if (value1 == BFLOAT16 || value2 == BFLOAT16) return FLOAT; if (value1 == INT8 || value2 == INT8) return FLOAT; return FLOAT; } @Override public String toString() { return name().toLowerCase(); } }; /** The empty tensor type - which is the same as a double */ public static final TensorType empty = new TensorType(Value.DOUBLE, Collections.emptyList()); private final Value valueType; /** Sorted list of the dimensions of this */ private final ImmutableList<Dimension> dimensions; private final TensorType mappedSubtype; public TensorType(Value valueType, Collection<Dimension> dimensions) { this.valueType = valueType; List<Dimension> dimensionList = new ArrayList<>(dimensions); Collections.sort(dimensionList); this.dimensions = ImmutableList.copyOf(dimensionList); if (dimensionList.stream().allMatch(d -> d.isIndexed())) mappedSubtype = empty; else if (dimensionList.stream().noneMatch(d -> d.isIndexed())) mappedSubtype = this; else mappedSubtype = new TensorType(valueType, dimensions.stream().filter(d -> ! d.isIndexed()).collect(Collectors.toList())); } static public Value combinedValueType(TensorType ... types) { List<Value> valueTypes = new ArrayList<>(); for (TensorType type : types) { if (type.rank() > 0) { valueTypes.add(type.valueType()); } } return Value.largestOf(valueTypes); } /** * Returns a tensor type instance from a * <a href="https: * <code>tensor(dimension1, dimension2, ...)</code> * where each dimension is either * <ul> * <li><code>dimension-name[]</code> - an unbound indexed dimension * <li><code>dimension-name[int]</code> - an bound indexed dimension * <li><code>dimension-name{}</code> - a mapped dimension * </ul> * Example: <code>tensor(x[10],y[20])</code> (a matrix) */ public static TensorType fromSpec(String specString) { return TensorTypeParser.fromSpec(specString); } /** Returns the numeric type of the cell values of this */ public Value valueType() { return valueType; } /** The type representing the mapped subset of dimensions of this. */ public TensorType mappedSubtype() { return mappedSubtype; } /** Returns the number of dimensions of this: dimensions().size() */ public int rank() { return dimensions.size(); } /** Returns an immutable list of the dimensions of this */ public List<Dimension> dimensions() { return dimensions; } /** Returns an immutable set of the names of the dimensions of this */ public Set<String> dimensionNames() { return dimensions.stream().map(Dimension::name).collect(Collectors.toSet()); } /** Returns the dimension with this name, or empty if not present */ public Optional<Dimension> dimension(String name) { return indexOfDimension(name).map(i -> dimensions.get(i)); } /** Returns the 0-base index of this dimension, or empty if it is not present */ public Optional<Integer> indexOfDimension(String dimension) { for (int i = 0; i < dimensions.size(); i++) if (dimensions.get(i).name().equals(dimension)) return Optional.of(i); return Optional.empty(); } /* Returns the bound of this dimension if it is present and bound in this, empty otherwise */ public Optional<Long> sizeOfDimension(String dimension) { Optional<Dimension> d = dimension(dimension); if ( ! d.isPresent()) return Optional.empty(); return d.get().size(); } /** * Returns whether this type can be assigned to the given type, * i.e if the given type is a generalization of this type. */ public boolean isAssignableTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, false, true); } /** * Returns whether this type can be converted to the given type. * This is true if this type isAssignableTo the given type or * if it is not assignable only because it has a shorter dimension length * than the given type in some shared dimension(s), as it can then be * converted to the given type by zero padding. */ public boolean isConvertibleTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, true, true); } /** * Returns whether or not this type can simply be renamed to * the given type. This is the same as being assignable, but disregarding * dimension names. */ public boolean isRenamableTo(TensorType other) { return isConvertibleOrAssignableTo(other, false, false); } private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible, boolean considerName) { if ( ! generalization.valueType().isEqualOrLargerThan(this.valueType) ) return false; if (generalization.dimensions().size() != this.dimensions().size()) return false; for (int i = 0; i < generalization.dimensions().size(); i++) { Dimension thisDimension = this.dimensions().get(i); Dimension generalizationDimension = generalization.dimensions().get(i); if (thisDimension.isIndexed() != generalizationDimension.isIndexed()) return false; if (considerName && ! thisDimension.name().equals(generalizationDimension.name())) return false; if (generalizationDimension.size().isPresent()) { if ( ! thisDimension.size().isPresent()) return false; if (convertible) { if (thisDimension.size().get() > generalizationDimension.size().get()) return false; } else { if (!thisDimension.size().get().equals(generalizationDimension.size().get())) return false; } } } return true; } @Override public String toString() { return "tensor" + (valueType == Value.DOUBLE ? "" : "<" + valueType.id() + ">") + "(" + dimensions.stream().map(Dimension::toString).collect(Collectors.joining(",")) + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TensorType other = (TensorType)o; if ( (this.rank() == 0) && (other.rank() == 0)) return true; if ( this.valueType != other.valueType) return false; if ( ! this.dimensions.equals(other.dimensions)) return false; return true; } /** Returns whether the given type has the same dimension names as this */ public boolean mathematicallyEquals(TensorType other) { if (dimensions().size() != other.dimensions().size()) return false; for (int i = 0; i < dimensions().size(); i++) if (!dimensions().get(i).name().equals(other.dimensions().get(i).name())) return false; return true; } /** * Returns the dimensionwise generalization of this and the given type, or empty if no generalization exists. * A dimensionwise generalization exists if the two tensors share the same dimensions, and each dimension * is compatible. * For example, the dimensionwise generalization of tensor(x[],y[5]) and tensor(x[5],y[]) is tensor(x[],y[]) */ public Optional<TensorType> dimensionwiseGeneralizationWith(TensorType other) { if (this.equals(other)) return Optional.of(this); if (this.dimensions.size() != other.dimensions.size()) return Optional.empty(); Builder b = new Builder(TensorType.Value.largestOf(valueType, other.valueType)); for (int i = 0; i < dimensions.size(); i++) { Dimension thisDim = this.dimensions().get(i); Dimension otherDim = other.dimensions().get(i); if ( ! thisDim.name().equals(otherDim.name())) return Optional.empty(); if (thisDim.isIndexed() && otherDim.isIndexed()) { if (thisDim.size().isPresent() && otherDim.size().isPresent()) { if ( ! thisDim.size().get().equals(otherDim.size().get())) return Optional.empty(); b.dimension(thisDim); } else if (thisDim.size().isPresent()) { b.dimension(otherDim); } else if (otherDim.size().isPresent()) { b.dimension(thisDim); } else { b.dimension(thisDim); } } else if ( ! thisDim.isIndexed() && ! otherDim.isIndexed()) { b.dimension(thisDim); } else { return Optional.empty(); } } return Optional.of(b.build()); } @Override public int hashCode() { return Objects.hash(dimensions, valueType); } /** * A tensor dimension. * Dimensions have the natural order of their names. */ public static abstract class Dimension implements Comparable<Dimension> { public enum Type { indexedBound, indexedUnbound, mapped } private final String name; private Dimension(String name) { this.name = requireIdentifier(name); } public final String name() { return name; } /** Returns the size of this dimension if it is bound, empty otherwise */ public abstract Optional<Long> size(); public abstract Type type(); /** Returns a copy of this with the name set to the given name */ public abstract Dimension withName(String name); /** Returns true if this is an indexed bound or unbound type */ public boolean isIndexed() { return type() == Type.indexedBound || type() == Type.indexedUnbound; } /** Returns true if this is of the mapped type */ public boolean isMapped() { return type() == Type.mapped; } /** * Returns the dimension resulting from combining two dimensions having the same name but possibly different * types: * * [N] + [M] = [ min(N, M) ] * [N] + [] = [] * [] + {} = {} */ Dimension combineWith(Optional<Dimension> other, boolean allowDifferentSizes) { if ( ! other.isPresent()) return this; if (this instanceof MappedDimension) return this; if (other.get() instanceof MappedDimension) return other.get(); if (this instanceof IndexedUnboundDimension) return this; if (other.get() instanceof IndexedUnboundDimension) return other.get(); IndexedBoundDimension thisIb = (IndexedBoundDimension)this; IndexedBoundDimension otherIb = (IndexedBoundDimension)other.get(); if (allowDifferentSizes) return thisIb.size().get() < otherIb.size().get() ? thisIb : otherIb; if ( ! thisIb.size().equals(otherIb.size())) throw new IllegalArgumentException("Unequal dimension sizes in " + thisIb + " and " + otherIb); return thisIb; } @Override public abstract String toString(); @Override public boolean equals(Object other) { if (this == other) return true; if (other == null || getClass() != other.getClass()) return false; return name.equals(((Dimension)other).name); } @Override public int hashCode() { return name.hashCode(); } @Override public int compareTo(Dimension other) { return this.name.compareTo(other.name); } public static Dimension indexed(String name, long size) { return new IndexedBoundDimension(name, size); } public static Dimension indexed(String name) { return new IndexedUnboundDimension(name); } public static Dimension mapped(String name) { return new MappedDimension(name); } static private String requireIdentifier(String name) { if (name == null) throw new IllegalArgumentException("A dimension name cannot be null"); if ( ! TensorType.labelMatcher.matches(name)) throw new IllegalArgumentException("A dimension name must be an identifier or integer, not '" + name + "'"); return name; } } public static class IndexedBoundDimension extends TensorType.Dimension { private final Long size; private IndexedBoundDimension(String name, long size) { super(name); if (size < 1) throw new IllegalArgumentException("Size of bound dimension '" + name + "' must be at least 1"); if (size > Integer.MAX_VALUE) throw new IllegalArgumentException("Size of bound dimension '" + name + "' cannot be larger than " + Integer.MAX_VALUE); this.size = size; } @Override public Optional<Long> size() { return Optional.of(size); } @Override public Type type() { return Type.indexedBound; } @Override public IndexedBoundDimension withName(String name) { return new IndexedBoundDimension(name, size); } @Override public String toString() { return name() + "[" + size + "]"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; IndexedBoundDimension that = (IndexedBoundDimension) o; if (!size.equals(that.size)) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + size.hashCode(); return result; } } public static class IndexedUnboundDimension extends TensorType.Dimension { private IndexedUnboundDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.indexedUnbound; } @Override public IndexedUnboundDimension withName(String name) { return new IndexedUnboundDimension(name); } @Override public String toString() { return name() + "[]"; } } public static class MappedDimension extends TensorType.Dimension { private MappedDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.mapped; } @Override public MappedDimension withName(String name) { return new MappedDimension(name); } @Override public String toString() { return name() + "{}"; } } public static class Builder { private final Map<String, Dimension> dimensions = new LinkedHashMap<>(); private final Value valueType; /** Creates an empty builder with cells of type double */ public Builder() { this(Value.DOUBLE); } public Builder(Value valueType) { this.valueType = valueType; } /** * Creates a builder containing a combination of the dimensions of the given types * * If the same dimension is indexed with different size restrictions the smallest size will be used. * If it is size restricted in one argument but not the other it will not be size restricted. * If it is indexed in one and mapped in the other it will become mapped. * * The value type will be the largest of the value types of the input types */ public Builder(TensorType ... types) { this(true, types); } public Builder(boolean allowDifferentSizes, TensorType ... types) { this.valueType = TensorType.combinedValueType(types); for (TensorType type : types) addDimensionsOf(type, allowDifferentSizes); } /** Creates a builder from the given dimensions, having double as the value type */ public Builder(Iterable<Dimension> dimensions) { this(Value.DOUBLE, dimensions); } /** Creates a builder from the given value type and dimensions */ public Builder(Value valueType, Iterable<Dimension> dimensions) { this.valueType = valueType; for (TensorType.Dimension dimension : dimensions) { dimension(dimension); } } private void addDimensionsOf(TensorType type, boolean allowDifferentSizes) { for (Dimension dimension : type.dimensions) { set(dimension.combineWith(Optional.ofNullable(dimensions.get(dimension.name())), allowDifferentSizes)); } } /** Returns the current number of dimensions in this */ public int rank() { return dimensions.size(); } /** * Adds a new dimension to this * * @throws IllegalArgumentException if the dimension is already present */ private Builder add(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); if (dimensions.containsKey(dimension.name())) throw new IllegalArgumentException("Could not add dimension " + dimension + " as this dimension " + "is already present"); dimensions.put(dimension.name(), dimension); return this; } /** Adds or replaces a dimension in this */ public Builder set(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); dimensions.put(dimension.name(), dimension); return this; } /** * Adds a bound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name, long size) { return add(new IndexedBoundDimension(name, size)); } /** * Adds an unbound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name) { return add(new IndexedUnboundDimension(name)); } /** * Adds a mapped dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder mapped(String name) { return add(new MappedDimension(name)); } /** Adds the given dimension */ public Builder dimension(Dimension dimension) { return add(dimension); } /** Returns the given dimension, or empty if none is present */ public Optional<Dimension> getDimension(String dimension) { return Optional.ofNullable(dimensions.get(dimension)); } public Builder dimension(String name, Dimension.Type type) { switch (type) { case mapped : mapped(name); break; case indexedUnbound : indexed(name); break; default : throw new IllegalArgumentException("This can not create a dimension of type " + type); } return this; } public TensorType build() { return new TensorType(valueType, dimensions.values()); } } }
class TensorType { static Ascii7BitMatcher labelMatcher = new Ascii7BitMatcher("-_@" + charsAndNumbers(), "_@$" + charsAndNumbers()); /** The permissible cell value types. Default is double. */ public enum Value { DOUBLE("double"), FLOAT("float"), INT8("int8"), BFLOAT16("bfloat16"); private final String id; Value(String id) { this.id = id; } public String id() { return id; } public boolean isEqualOrLargerThan(TensorType.Value other) { return this == other || largestOf(this, other) == this; } public static Value largestOf(List<Value> values) { if (values.isEmpty()) return Value.DOUBLE; Value largest = null; for (Value value : values) { if (largest == null) largest = value; else largest = largestOf(largest, value); } return largest; } public static Value largestOf(Value value1, Value value2) { if (value1 == DOUBLE || value2 == DOUBLE) return DOUBLE; if (value1 == FLOAT || value2 == FLOAT) return FLOAT; if (value1 == BFLOAT16 || value2 == BFLOAT16) return BFLOAT16; return INT8; } @Override public String toString() { return name().toLowerCase(); } }; /** The empty tensor type - which is the same as a double */ public static final TensorType empty = new TensorType(Value.DOUBLE, Collections.emptyList()); private final Value valueType; /** Sorted list of the dimensions of this */ private final ImmutableList<Dimension> dimensions; private final TensorType mappedSubtype; public TensorType(Value valueType, Collection<Dimension> dimensions) { this.valueType = valueType; List<Dimension> dimensionList = new ArrayList<>(dimensions); Collections.sort(dimensionList); this.dimensions = ImmutableList.copyOf(dimensionList); if (dimensionList.stream().allMatch(d -> d.isIndexed())) mappedSubtype = empty; else if (dimensionList.stream().noneMatch(d -> d.isIndexed())) mappedSubtype = this; else mappedSubtype = new TensorType(valueType, dimensions.stream().filter(d -> ! d.isIndexed()).collect(Collectors.toList())); } static public Value combinedValueType(TensorType ... types) { List<Value> valueTypes = new ArrayList<>(); for (TensorType type : types) { if (type.rank() > 0) { valueTypes.add(type.valueType()); } } return Value.largestOf(valueTypes); } /** * Returns a tensor type instance from a * <a href="https: * <code>tensor(dimension1, dimension2, ...)</code> * where each dimension is either * <ul> * <li><code>dimension-name[]</code> - an unbound indexed dimension * <li><code>dimension-name[int]</code> - an bound indexed dimension * <li><code>dimension-name{}</code> - a mapped dimension * </ul> * Example: <code>tensor(x[10],y[20])</code> (a matrix) */ public static TensorType fromSpec(String specString) { return TensorTypeParser.fromSpec(specString); } /** Returns the numeric type of the cell values of this */ public Value valueType() { return valueType; } /** The type representing the mapped subset of dimensions of this. */ public TensorType mappedSubtype() { return mappedSubtype; } /** Returns the number of dimensions of this: dimensions().size() */ public int rank() { return dimensions.size(); } /** Returns an immutable list of the dimensions of this */ public List<Dimension> dimensions() { return dimensions; } /** Returns an immutable set of the names of the dimensions of this */ public Set<String> dimensionNames() { return dimensions.stream().map(Dimension::name).collect(Collectors.toSet()); } /** Returns the dimension with this name, or empty if not present */ public Optional<Dimension> dimension(String name) { return indexOfDimension(name).map(i -> dimensions.get(i)); } /** Returns the 0-base index of this dimension, or empty if it is not present */ public Optional<Integer> indexOfDimension(String dimension) { for (int i = 0; i < dimensions.size(); i++) if (dimensions.get(i).name().equals(dimension)) return Optional.of(i); return Optional.empty(); } /* Returns the bound of this dimension if it is present and bound in this, empty otherwise */ public Optional<Long> sizeOfDimension(String dimension) { Optional<Dimension> d = dimension(dimension); if ( ! d.isPresent()) return Optional.empty(); return d.get().size(); } /** * Returns whether this type can be assigned to the given type, * i.e if the given type is a generalization of this type. */ public boolean isAssignableTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, false, true); } /** * Returns whether this type can be converted to the given type. * This is true if this type isAssignableTo the given type or * if it is not assignable only because it has a shorter dimension length * than the given type in some shared dimension(s), as it can then be * converted to the given type by zero padding. */ public boolean isConvertibleTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, true, true); } /** * Returns whether or not this type can simply be renamed to * the given type. This is the same as being assignable, but disregarding * dimension names. */ public boolean isRenamableTo(TensorType other) { return isConvertibleOrAssignableTo(other, false, false); } private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible, boolean considerName) { if ( ! generalization.valueType().isEqualOrLargerThan(this.valueType) ) return false; if (generalization.dimensions().size() != this.dimensions().size()) return false; for (int i = 0; i < generalization.dimensions().size(); i++) { Dimension thisDimension = this.dimensions().get(i); Dimension generalizationDimension = generalization.dimensions().get(i); if (thisDimension.isIndexed() != generalizationDimension.isIndexed()) return false; if (considerName && ! thisDimension.name().equals(generalizationDimension.name())) return false; if (generalizationDimension.size().isPresent()) { if ( ! thisDimension.size().isPresent()) return false; if (convertible) { if (thisDimension.size().get() > generalizationDimension.size().get()) return false; } else { if (!thisDimension.size().get().equals(generalizationDimension.size().get())) return false; } } } return true; } @Override public String toString() { return "tensor" + (valueType == Value.DOUBLE ? "" : "<" + valueType.id() + ">") + "(" + dimensions.stream().map(Dimension::toString).collect(Collectors.joining(",")) + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TensorType other = (TensorType)o; if ( (this.rank() == 0) && (other.rank() == 0)) return true; if ( this.valueType != other.valueType) return false; if ( ! this.dimensions.equals(other.dimensions)) return false; return true; } /** Returns whether the given type has the same dimension names as this */ public boolean mathematicallyEquals(TensorType other) { if (dimensions().size() != other.dimensions().size()) return false; for (int i = 0; i < dimensions().size(); i++) if (!dimensions().get(i).name().equals(other.dimensions().get(i).name())) return false; return true; } /** * Returns the dimensionwise generalization of this and the given type, or empty if no generalization exists. * A dimensionwise generalization exists if the two tensors share the same dimensions, and each dimension * is compatible. * For example, the dimensionwise generalization of tensor(x[],y[5]) and tensor(x[5],y[]) is tensor(x[],y[]) */ public Optional<TensorType> dimensionwiseGeneralizationWith(TensorType other) { if (this.equals(other)) return Optional.of(this); if (this.dimensions.size() != other.dimensions.size()) return Optional.empty(); Builder b = new Builder(TensorType.Value.largestOf(valueType, other.valueType)); for (int i = 0; i < dimensions.size(); i++) { Dimension thisDim = this.dimensions().get(i); Dimension otherDim = other.dimensions().get(i); if ( ! thisDim.name().equals(otherDim.name())) return Optional.empty(); if (thisDim.isIndexed() && otherDim.isIndexed()) { if (thisDim.size().isPresent() && otherDim.size().isPresent()) { if ( ! thisDim.size().get().equals(otherDim.size().get())) return Optional.empty(); b.dimension(thisDim); } else if (thisDim.size().isPresent()) { b.dimension(otherDim); } else if (otherDim.size().isPresent()) { b.dimension(thisDim); } else { b.dimension(thisDim); } } else if ( ! thisDim.isIndexed() && ! otherDim.isIndexed()) { b.dimension(thisDim); } else { return Optional.empty(); } } return Optional.of(b.build()); } @Override public int hashCode() { return Objects.hash(dimensions, valueType); } /** * A tensor dimension. * Dimensions have the natural order of their names. */ public static abstract class Dimension implements Comparable<Dimension> { public enum Type { indexedBound, indexedUnbound, mapped } private final String name; private Dimension(String name) { this.name = requireIdentifier(name); } public final String name() { return name; } /** Returns the size of this dimension if it is bound, empty otherwise */ public abstract Optional<Long> size(); public abstract Type type(); /** Returns a copy of this with the name set to the given name */ public abstract Dimension withName(String name); /** Returns true if this is an indexed bound or unbound type */ public boolean isIndexed() { return type() == Type.indexedBound || type() == Type.indexedUnbound; } /** Returns true if this is of the mapped type */ public boolean isMapped() { return type() == Type.mapped; } /** * Returns the dimension resulting from combining two dimensions having the same name but possibly different * types: * * [N] + [M] = [ min(N, M) ] * [N] + [] = [] * [] + {} = {} */ Dimension combineWith(Optional<Dimension> other, boolean allowDifferentSizes) { if ( ! other.isPresent()) return this; if (this instanceof MappedDimension) return this; if (other.get() instanceof MappedDimension) return other.get(); if (this instanceof IndexedUnboundDimension) return this; if (other.get() instanceof IndexedUnboundDimension) return other.get(); IndexedBoundDimension thisIb = (IndexedBoundDimension)this; IndexedBoundDimension otherIb = (IndexedBoundDimension)other.get(); if (allowDifferentSizes) return thisIb.size().get() < otherIb.size().get() ? thisIb : otherIb; if ( ! thisIb.size().equals(otherIb.size())) throw new IllegalArgumentException("Unequal dimension sizes in " + thisIb + " and " + otherIb); return thisIb; } @Override public abstract String toString(); @Override public boolean equals(Object other) { if (this == other) return true; if (other == null || getClass() != other.getClass()) return false; return name.equals(((Dimension)other).name); } @Override public int hashCode() { return name.hashCode(); } @Override public int compareTo(Dimension other) { return this.name.compareTo(other.name); } public static Dimension indexed(String name, long size) { return new IndexedBoundDimension(name, size); } public static Dimension indexed(String name) { return new IndexedUnboundDimension(name); } public static Dimension mapped(String name) { return new MappedDimension(name); } static private String requireIdentifier(String name) { if (name == null) throw new IllegalArgumentException("A dimension name cannot be null"); if ( ! TensorType.labelMatcher.matches(name)) throw new IllegalArgumentException("A dimension name must be an identifier or integer, not '" + name + "'"); return name; } } public static class IndexedBoundDimension extends TensorType.Dimension { private final Long size; private IndexedBoundDimension(String name, long size) { super(name); if (size < 1) throw new IllegalArgumentException("Size of bound dimension '" + name + "' must be at least 1"); if (size > Integer.MAX_VALUE) throw new IllegalArgumentException("Size of bound dimension '" + name + "' cannot be larger than " + Integer.MAX_VALUE); this.size = size; } @Override public Optional<Long> size() { return Optional.of(size); } @Override public Type type() { return Type.indexedBound; } @Override public IndexedBoundDimension withName(String name) { return new IndexedBoundDimension(name, size); } @Override public String toString() { return name() + "[" + size + "]"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; IndexedBoundDimension that = (IndexedBoundDimension) o; if (!size.equals(that.size)) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + size.hashCode(); return result; } } public static class IndexedUnboundDimension extends TensorType.Dimension { private IndexedUnboundDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.indexedUnbound; } @Override public IndexedUnboundDimension withName(String name) { return new IndexedUnboundDimension(name); } @Override public String toString() { return name() + "[]"; } } public static class MappedDimension extends TensorType.Dimension { private MappedDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.mapped; } @Override public MappedDimension withName(String name) { return new MappedDimension(name); } @Override public String toString() { return name() + "{}"; } } public static class Builder { private final Map<String, Dimension> dimensions = new LinkedHashMap<>(); private final Value valueType; /** Creates an empty builder with cells of type double */ public Builder() { this(Value.DOUBLE); } public Builder(Value valueType) { this.valueType = valueType; } /** * Creates a builder containing a combination of the dimensions of the given types * * If the same dimension is indexed with different size restrictions the smallest size will be used. * If it is size restricted in one argument but not the other it will not be size restricted. * If it is indexed in one and mapped in the other it will become mapped. * * The value type will be the largest of the value types of the input types */ public Builder(TensorType ... types) { this(true, types); } public Builder(boolean allowDifferentSizes, TensorType ... types) { this.valueType = TensorType.combinedValueType(types); for (TensorType type : types) addDimensionsOf(type, allowDifferentSizes); } /** Creates a builder from the given dimensions, having double as the value type */ public Builder(Iterable<Dimension> dimensions) { this(Value.DOUBLE, dimensions); } /** Creates a builder from the given value type and dimensions */ public Builder(Value valueType, Iterable<Dimension> dimensions) { this.valueType = valueType; for (TensorType.Dimension dimension : dimensions) { dimension(dimension); } } private void addDimensionsOf(TensorType type, boolean allowDifferentSizes) { for (Dimension dimension : type.dimensions) { set(dimension.combineWith(Optional.ofNullable(dimensions.get(dimension.name())), allowDifferentSizes)); } } /** Returns the current number of dimensions in this */ public int rank() { return dimensions.size(); } /** * Adds a new dimension to this * * @throws IllegalArgumentException if the dimension is already present */ private Builder add(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); if (dimensions.containsKey(dimension.name())) throw new IllegalArgumentException("Could not add dimension " + dimension + " as this dimension " + "is already present"); dimensions.put(dimension.name(), dimension); return this; } /** Adds or replaces a dimension in this */ public Builder set(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); dimensions.put(dimension.name(), dimension); return this; } /** * Adds a bound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name, long size) { return add(new IndexedBoundDimension(name, size)); } /** * Adds an unbound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name) { return add(new IndexedUnboundDimension(name)); } /** * Adds a mapped dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder mapped(String name) { return add(new MappedDimension(name)); } /** Adds the given dimension */ public Builder dimension(Dimension dimension) { return add(dimension); } /** Returns the given dimension, or empty if none is present */ public Optional<Dimension> getDimension(String dimension) { return Optional.ofNullable(dimensions.get(dimension)); } public Builder dimension(String name, Dimension.Type type) { switch (type) { case mapped : mapped(name); break; case indexedUnbound : indexed(name); break; default : throw new IllegalArgumentException("This can not create a dimension of type " + type); } return this; } public TensorType build() { return new TensorType(valueType, dimensions.values()); } } }
consider adding some utility methods (in TypedBinaryFormat maybe) for the low-level bit shuffling: ``` static short bitsFromBFloat16(float bf16) { int bits = Float.floatToRawIntBits(bf16); return (short) (bits >>> 16); } static float bFloat16FromBits(short bits) { return Float.intBitsToFloat(bits << 16); } ```
private void encodeBFloat16Cells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putShort((short)(Float.floatToRawIntBits(tensor.getFloat(i)) >>> 16)); }
buffer.putShort((short)(Float.floatToRawIntBits(tensor.getFloat(i)) >>> 16));
private void encodeBFloat16Cells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putShort(TypedBinaryFormat.bFloat16BitsFromFloat(tensor.getFloat(i))); }
class DenseBinaryFormat implements BinaryFormat { private final TensorType.Value serializationValueType; DenseBinaryFormat() { this(TensorType.Value.DOUBLE); } DenseBinaryFormat(TensorType.Value serializationValueType) { this.serializationValueType = serializationValueType; } @Override public void encode(GrowableByteBuffer buffer, Tensor tensor) { if ( ! ( tensor instanceof IndexedTensor)) throw new RuntimeException("The dense format is only supported for indexed tensors"); encodeDimensions(buffer, (IndexedTensor)tensor); encodeCells(buffer, (IndexedTensor)tensor); } private void encodeDimensions(GrowableByteBuffer buffer, IndexedTensor tensor) { buffer.putInt1_4Bytes(tensor.type().dimensions().size()); for (int i = 0; i < tensor.type().dimensions().size(); i++) { buffer.putUtf8String(tensor.type().dimensions().get(i).name()); buffer.putInt1_4Bytes((int)tensor.dimensionSizes().size(i)); } } private void encodeCells(GrowableByteBuffer buffer, IndexedTensor tensor) { switch (serializationValueType) { case DOUBLE: encodeDoubleCells(tensor, buffer); break; case FLOAT: encodeFloatCells(tensor, buffer); break; case BFLOAT16: encodeBFloat16Cells(tensor, buffer); break; case INT8: encodeInt8Cells(tensor, buffer); break; } } private void encodeDoubleCells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putDouble(tensor.get(i)); } private void encodeFloatCells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putFloat(tensor.getFloat(i)); } private void encodeInt8Cells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.put((byte) tensor.getFloat(i)); } @Override public Tensor decode(Optional<TensorType> optionalType, GrowableByteBuffer buffer) { TensorType type; DimensionSizes sizes; if (optionalType.isPresent()) { type = optionalType.get(); if (type.valueType() != this.serializationValueType) { throw new IllegalArgumentException("Tensor value type mismatch. Value type " + type.valueType() + " is not " + this.serializationValueType); } TensorType serializedType = decodeType(buffer); if ( ! serializedType.isAssignableTo(type)) throw new IllegalArgumentException("Type/instance mismatch: A tensor of type " + serializedType + " cannot be assigned to type " + type); sizes = sizesFromType(serializedType); } else { type = decodeType(buffer); sizes = sizesFromType(type); } Tensor.Builder builder = Tensor.Builder.of(type, sizes); decodeCells(sizes, buffer, (IndexedTensor.BoundBuilder)builder); return builder.build(); } private TensorType decodeType(GrowableByteBuffer buffer) { TensorType.Builder builder = new TensorType.Builder(serializationValueType); int dimensionCount = buffer.getInt1_4Bytes(); for (int i = 0; i < dimensionCount; i++) builder.indexed(buffer.getUtf8String(), buffer.getInt1_4Bytes()); return builder.build(); } /** Returns dimension sizes from a type consisting of fully specified, indexed dimensions only */ private DimensionSizes sizesFromType(TensorType type) { DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < type.dimensions().size(); i++) builder.set(i, type.dimensions().get(i).size().get()); return builder.build(); } private void decodeCells(DimensionSizes sizes, GrowableByteBuffer buffer, IndexedTensor.BoundBuilder builder) { switch (serializationValueType) { case DOUBLE: decodeDoubleCells(sizes, builder, buffer); break; case FLOAT: decodeFloatCells(sizes, builder, buffer); break; case BFLOAT16: decodeBFloat16Cells(sizes, builder, buffer); break; case INT8: decodeInt8Cells(sizes, builder, buffer); break; } } private void decodeDoubleCells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) builder.cellByDirectIndex(i, buffer.getDouble()); } private void decodeFloatCells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) builder.cellByDirectIndex(i, buffer.getFloat()); } private void decodeBFloat16Cells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) { builder.cellByDirectIndex(i, Float.intBitsToFloat(buffer.getShort() << 16)); } } private void decodeInt8Cells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) { builder.cellByDirectIndex(i, (float) buffer.get()); } } }
class DenseBinaryFormat implements BinaryFormat { private final TensorType.Value serializationValueType; DenseBinaryFormat() { this(TensorType.Value.DOUBLE); } DenseBinaryFormat(TensorType.Value serializationValueType) { this.serializationValueType = serializationValueType; } @Override public void encode(GrowableByteBuffer buffer, Tensor tensor) { if ( ! ( tensor instanceof IndexedTensor)) throw new RuntimeException("The dense format is only supported for indexed tensors"); encodeDimensions(buffer, (IndexedTensor)tensor); encodeCells(buffer, (IndexedTensor)tensor); } private void encodeDimensions(GrowableByteBuffer buffer, IndexedTensor tensor) { buffer.putInt1_4Bytes(tensor.type().dimensions().size()); for (int i = 0; i < tensor.type().dimensions().size(); i++) { buffer.putUtf8String(tensor.type().dimensions().get(i).name()); buffer.putInt1_4Bytes((int)tensor.dimensionSizes().size(i)); } } private void encodeCells(GrowableByteBuffer buffer, IndexedTensor tensor) { switch (serializationValueType) { case DOUBLE: encodeDoubleCells(tensor, buffer); break; case FLOAT: encodeFloatCells(tensor, buffer); break; case BFLOAT16: encodeBFloat16Cells(tensor, buffer); break; case INT8: encodeInt8Cells(tensor, buffer); break; } } private void encodeDoubleCells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putDouble(tensor.get(i)); } private void encodeFloatCells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putFloat(tensor.getFloat(i)); } private void encodeInt8Cells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.put((byte) tensor.getFloat(i)); } @Override public Tensor decode(Optional<TensorType> optionalType, GrowableByteBuffer buffer) { TensorType type; DimensionSizes sizes; if (optionalType.isPresent()) { type = optionalType.get(); if (type.valueType() != this.serializationValueType) { throw new IllegalArgumentException("Tensor value type mismatch. Value type " + type.valueType() + " is not " + this.serializationValueType); } TensorType serializedType = decodeType(buffer); if ( ! serializedType.isAssignableTo(type)) throw new IllegalArgumentException("Type/instance mismatch: A tensor of type " + serializedType + " cannot be assigned to type " + type); sizes = sizesFromType(serializedType); } else { type = decodeType(buffer); sizes = sizesFromType(type); } Tensor.Builder builder = Tensor.Builder.of(type, sizes); decodeCells(sizes, buffer, (IndexedTensor.BoundBuilder)builder); return builder.build(); } private TensorType decodeType(GrowableByteBuffer buffer) { TensorType.Builder builder = new TensorType.Builder(serializationValueType); int dimensionCount = buffer.getInt1_4Bytes(); for (int i = 0; i < dimensionCount; i++) builder.indexed(buffer.getUtf8String(), buffer.getInt1_4Bytes()); return builder.build(); } /** Returns dimension sizes from a type consisting of fully specified, indexed dimensions only */ private DimensionSizes sizesFromType(TensorType type) { DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < type.dimensions().size(); i++) builder.set(i, type.dimensions().get(i).size().get()); return builder.build(); } private void decodeCells(DimensionSizes sizes, GrowableByteBuffer buffer, IndexedTensor.BoundBuilder builder) { switch (serializationValueType) { case DOUBLE: decodeDoubleCells(sizes, builder, buffer); break; case FLOAT: decodeFloatCells(sizes, builder, buffer); break; case BFLOAT16: decodeBFloat16Cells(sizes, builder, buffer); break; case INT8: decodeInt8Cells(sizes, builder, buffer); break; } } private void decodeDoubleCells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) builder.cellByDirectIndex(i, buffer.getDouble()); } private void decodeFloatCells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) builder.cellByDirectIndex(i, buffer.getFloat()); } private void decodeBFloat16Cells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) { builder.cellByDirectIndex(i, TypedBinaryFormat.floatFromBFloat16Bits(buffer.getShort())); } } private void decodeInt8Cells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) { builder.cellByDirectIndex(i, (float) buffer.get()); } } }
Thanks, fixed.
static void indexedBlockToString(IndexedTensor tensor, Indexes indexes, StringBuilder b) { for (int index = 0; index < tensor.size(); index++) { indexes.next(); for (int i = 0; i < indexes.nextDimensionsAtStart(); i++) b.append("["); if (tensor.type().valueType() == TensorType.Value.DOUBLE) b.append(tensor.get(index)); else if (tensor.type().valueType() == TensorType.Value.FLOAT) b.append(tensor.getFloat(index)); else if (tensor.type().valueType() == TensorType.Value.BFLOAT16) b.append(tensor.getFloat(index)); else if (tensor.type().valueType() == TensorType.Value.INT8) b.append(tensor.getFloat(index)); else throw new IllegalStateException("Unexpected value type " + tensor.type().valueType()); for (int i = 0; i < indexes.nextDimensionsAtEnd(); i++) b.append("]"); if (index < tensor.size() - 1) b.append(", "); } }
b.append(tensor.getFloat(index));
static void indexedBlockToString(IndexedTensor tensor, Indexes indexes, StringBuilder b) { for (int index = 0; index < tensor.size(); index++) { indexes.next(); for (int i = 0; i < indexes.nextDimensionsAtStart(); i++) b.append("["); switch (tensor.type().valueType()) { case DOUBLE: b.append(tensor.get(index)); break; case FLOAT: b.append(tensor.getFloat(index)); break; case BFLOAT16: b.append(tensor.getFloat(index)); break; case INT8: b.append(tensor.getFloat(index)); break; default: throw new IllegalStateException("Unexpected value type " + tensor.type().valueType()); } for (int i = 0; i < indexes.nextDimensionsAtEnd(); i++) b.append("]"); if (index < tensor.size() - 1) b.append(", "); } }
class IndexedTensor implements Tensor { /** The prescribed and possibly abstract type this is an instance of */ private final TensorType type; /** The sizes of the dimensions of this in the order of the dimensions of the type */ private final DimensionSizes dimensionSizes; IndexedTensor(TensorType type, DimensionSizes dimensionSizes) { this.type = type; this.dimensionSizes = dimensionSizes; } /** * Returns an iterator over the cells of this in the <i>standard value order</i>. */ @Override public Iterator<Cell> cellIterator() { return new CellIterator(); } /** Returns an iterator over all the cells in this tensor which matches the given partial address */ public SubspaceIterator cellIterator(PartialAddress partialAddress, DimensionSizes iterationSizes) { long[] startAddress = new long[type().dimensions().size()]; List<Integer> iterateDimensions = new ArrayList<>(); for (int i = 0; i < type().dimensions().size(); i++) { long partialAddressLabel = partialAddress.numericLabel(type.dimensions().get(i).name()); if (partialAddressLabel >= 0) startAddress[i] = partialAddressLabel; else iterateDimensions.add(i); } return new SubspaceIterator(iterateDimensions, startAddress, iterationSizes); } /** Returns an iterator over the values of this returned in the <i>standard value order</i> */ @Override public Iterator<Double> valueIterator() { return new ValueIterator(); } /** * Returns an iterator over value iterators where the outer iterator is over each unique value of the dimensions * given and the inner iterator is over each unique value of the rest of the dimensions, in the * <i>standard value order</i> * * @param dimensions the names of the dimensions of the superspace * @param sizes the size of each dimension in the space we are returning values for, containing * one value per dimension of this tensor (in order). Each size may be the same or smaller * than the corresponding size of this tensor */ public Iterator<SubspaceIterator> subspaceIterator(Set<String> dimensions, DimensionSizes sizes) { return new SuperspaceIterator(dimensions, sizes); } /** Returns a subspace iterator having the sizes of the dimensions of this tensor */ public Iterator<SubspaceIterator> subspaceIterator(Set<String> dimensions) { return subspaceIterator(dimensions, dimensionSizes); } /** * Returns the value at the given indexes as a double * * @param indexes the indexes into the dimensions of this. Must be one number per dimension of this * @throws IllegalArgumentException if any of the indexes are out of bound or a wrong number of indexes are given */ public double get(long ... indexes) { return get((int)toValueIndex(indexes, dimensionSizes)); } /** * Returns the value at the given indexes as a float * * @param indexes the indexes into the dimensions of this. Must be one number per dimension of this * @throws IllegalArgumentException if any of the indexes are out of bound or a wrong number of indexes are given */ public float getFloat(long ... indexes) { return getFloat((int)toValueIndex(indexes, dimensionSizes)); } /** Returns the value at this address, or NaN if there is no value at this address */ @Override public double get(TensorAddress address) { try { return get((int)toValueIndex(address, dimensionSizes, type)); } catch (IllegalArgumentException e) { return Double.NaN; } } /** * Returns the value at the given <i>standard value order</i> index as a double. * * @param valueIndex the direct index into the underlying data. * @throws IllegalArgumentException if index is out of bounds */ public abstract double get(long valueIndex); /** * Returns the value at the given <i>standard value order</i> index as a float. * * @param valueIndex the direct index into the underlying data. * @throws IllegalArgumentException if index is out of bounds */ public abstract float getFloat(long valueIndex); static long toValueIndex(long[] indexes, DimensionSizes sizes) { if (indexes.length == 1) return indexes[0]; if (indexes.length == 0) return 0; long valueIndex = 0; for (int i = 0; i < indexes.length; i++) { if (indexes[i] >= sizes.size(i)) throw new IllegalArgumentException(Arrays.toString(indexes) + " are not within bounds"); valueIndex += productOfDimensionsAfter(i, sizes) * indexes[i]; } return valueIndex; } static long toValueIndex(TensorAddress address, DimensionSizes sizes, TensorType type) { if (address.isEmpty()) return 0; long valueIndex = 0; for (int i = 0; i < address.size(); i++) { if (address.numericLabel(i) >= sizes.size(i)) throw new IllegalArgumentException(address + " is not within the bounds of " + type); valueIndex += productOfDimensionsAfter(i, sizes) * address.numericLabel(i); } return valueIndex; } private static long productOfDimensionsAfter(int afterIndex, DimensionSizes sizes) { long product = 1; for (int i = afterIndex + 1; i < sizes.dimensions(); i++) product *= sizes.size(i); return product; } void throwOnIncompatibleType(TensorType type) { if ( ! this.type().isRenamableTo(type)) throw new IllegalArgumentException("Can not change type from " + this.type() + " to " + type + ": Types are not compatible"); } @Override public TensorType type() { return type; } @Override public abstract IndexedTensor withType(TensorType type); public DimensionSizes dimensionSizes() { return dimensionSizes; } @Override public Map<TensorAddress, Double> cells() { if (dimensionSizes.dimensions() == 0) return Collections.singletonMap(TensorAddress.of(), get(0)); ImmutableMap.Builder<TensorAddress, Double> builder = new ImmutableMap.Builder<>(); Indexes indexes = Indexes.of(dimensionSizes, dimensionSizes, size()); for (long i = 0; i < size(); i++) { indexes.next(); builder.put(indexes.toAddress(), get(i)); } return builder.build(); } @Override public Tensor remove(Set<TensorAddress> addresses) { throw new IllegalArgumentException("Remove is not supported for indexed tensors"); } @Override public String toString() { if (type.rank() == 0) return Tensor.toStandardString(this); if (type.dimensions().stream().anyMatch(d -> d.size().isEmpty())) return Tensor.toStandardString(this); Indexes indexes = Indexes.of(dimensionSizes); StringBuilder b = new StringBuilder(type.toString()).append(":"); indexedBlockToString(this, indexes, b); return b.toString(); } @Override public boolean equals(Object other) { if ( ! ( other instanceof Tensor)) return false; return Tensor.equals(this, ((Tensor)other)); } public abstract static class Builder implements Tensor.Builder { final TensorType type; private Builder(TensorType type) { this.type = type; } public static Builder of(TensorType type) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type)); else return new UnboundBuilder(type); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor. This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, float[] values) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type), values); else return new UnboundBuilder(type); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor. This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, double[] values) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type), values); else return new UnboundBuilder(type); } /** * Create a builder with dimension size information for this instance. Must be one size entry per dimension, * and, agree with the type size information when specified in the type. * If sizes are completely specified in the type this size information is redundant. */ public static Builder of(TensorType type, DimensionSizes sizes) { validate(type, sizes); if (type.valueType() == TensorType.Value.FLOAT) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); else if (type.valueType() == TensorType.Value.BFLOAT16) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); else if (type.valueType() == TensorType.Value.INT8) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); else if (type.valueType() == TensorType.Value.DOUBLE) return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes); else return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor in the <i>standard value order</i>. * This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, DimensionSizes sizes, float[] values) { validate(type, sizes); validateSizes(sizes, values.length); if (type.valueType() == TensorType.Value.FLOAT) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes, values); else if (type.valueType() == TensorType.Value.BFLOAT16) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.INT8) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.DOUBLE) return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes).fill(values); else return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes).fill(values); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor in the <i>standard value order</i>. * This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, DimensionSizes sizes, double[] values) { validate(type, sizes); validateSizes(sizes, values.length); if (type.valueType() == TensorType.Value.FLOAT) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.BFLOAT16) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.INT8) return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); else if (type.valueType() == TensorType.Value.DOUBLE) return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes, values); else return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes, values); } private static void validateSizes(DimensionSizes sizes, int length) { if (sizes.totalSize() != length) { throw new IllegalArgumentException("Invalid size(" + length + ") of supplied value vector." + " Type specifies that size should be " + sizes.totalSize()); } } private static void validate(TensorType type, DimensionSizes sizes) { if (sizes.dimensions() != type.dimensions().size()) throw new IllegalArgumentException(sizes.dimensions() + " is the wrong number of dimensions for " + type); for (int i = 0; i < sizes.dimensions(); i++ ) { Optional<Long> size = type.dimensions().get(i).size(); if (size.isPresent() && size.get() < sizes.size(i)) throw new IllegalArgumentException("Size of dimension " + type.dimensions().get(i).name() + " is " + sizes.size(i) + " but cannot be larger than " + size.get() + " in " + type); } } public abstract Builder cell(double value, long ... indexes); public abstract Builder cell(float value, long ... indexes); @Override public TensorType type() { return type; } @Override public abstract IndexedTensor build(); } public interface DirectIndexBuilder { TensorType type(); /** Sets a value by its <i>standard value order</i> index */ void cellByDirectIndex(long index, double value); /** Sets a value by its <i>standard value order</i> index */ void cellByDirectIndex(long index, float value); } /** A bound builder can create the double array directly */ public static abstract class BoundBuilder extends Builder implements DirectIndexBuilder { private DimensionSizes sizes; private static DimensionSizes dimensionSizesOf(TensorType type) { DimensionSizes.Builder b = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < type.dimensions().size(); i++) b.set(i, type.dimensions().get(i).size().get()); return b.build(); } BoundBuilder(TensorType type, DimensionSizes sizes) { super(type); if ( sizes.dimensions() != type.dimensions().size()) throw new IllegalArgumentException("Must have a dimension size entry for each dimension in " + type); this.sizes = sizes; } BoundBuilder fill(float[] values) { long index = 0; for (float value : values) { cellByDirectIndex(index++, value); } return this; } BoundBuilder fill(double[] values) { long index = 0; for (double value : values) { cellByDirectIndex(index++, value); } return this; } DimensionSizes sizes() { return sizes; } } /** * A builder used when we don't know the size of the dimensions up front. * All values is all dimensions must be specified. */ private static class UnboundBuilder extends Builder { /** List of List or Double */ private List<Object> firstDimension = null; private UnboundBuilder(TensorType type) { super(type); } @Override public IndexedTensor build() { if (firstDimension == null) throw new IllegalArgumentException("Tensor of type " + type() + " has no values"); if (type.dimensions().isEmpty()) return new IndexedDoubleTensor(type, new DimensionSizes.Builder(type.dimensions().size()).build(), new double[] {(Double) firstDimension.get(0) }); DimensionSizes dimensionSizes = findDimensionSizes(firstDimension); double[] values = new double[(int)dimensionSizes.totalSize()]; fillValues(0, 0, firstDimension, dimensionSizes, values); return new IndexedDoubleTensor(type, dimensionSizes, values); } private DimensionSizes findDimensionSizes(List<Object> firstDimension) { List<Long> dimensionSizeList = new ArrayList<>(type.dimensions().size()); findDimensionSizes(0, dimensionSizeList, firstDimension); DimensionSizes.Builder b = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < b.dimensions(); i++) { if (i < dimensionSizeList.size()) b.set(i, dimensionSizeList.get(i)); } return b.build(); } @SuppressWarnings("unchecked") private void findDimensionSizes(int currentDimensionIndex, List<Long> dimensionSizes, List<Object> currentDimension) { if (currentDimensionIndex == dimensionSizes.size()) dimensionSizes.add((long)currentDimension.size()); else if (dimensionSizes.get(currentDimensionIndex) != currentDimension.size()) throw new IllegalArgumentException("Missing values in dimension " + type.dimensions().get(currentDimensionIndex) + " in " + type); for (Object value : currentDimension) if (value instanceof List) findDimensionSizes(currentDimensionIndex + 1, dimensionSizes, (List<Object>)value); } @SuppressWarnings("unchecked") private void fillValues(int currentDimensionIndex, long offset, List<Object> currentDimension, DimensionSizes sizes, double[] values) { if (currentDimensionIndex < sizes.dimensions() - 1) { for (long i = 0; i < currentDimension.size(); i++) fillValues(currentDimensionIndex + 1, offset + productOfDimensionsAfter(currentDimensionIndex, sizes) * i, (List<Object>) currentDimension.get((int)i), sizes, values); } else { for (long i = 0; i < currentDimension.size(); i++) { values[(int)(offset + i)] = nullAsZero((Double)currentDimension.get((int)i)); } } } private double nullAsZero(Double value) { if (value == null) return 0; return value; } @Override public CellBuilder cell() { return new CellBuilder(type, this); } @Override public Builder cell(TensorAddress address, float value) { return cell(address, (double)value); } @Override public Builder cell(TensorAddress address, double value) { long[] indexes = new long[address.size()]; for (int i = 0; i < address.size(); i++) { indexes[i] = address.numericLabel(i); } cell(value, indexes); return this; } @Override public Builder cell(float value, long... indexes) { return cell((double)value, indexes); } /** * Set a value using an index API. The number of indexes must be the same as the dimensions in the type of this. * Values can be written in any order but all values needed to make this dense must be provided * before building this. * * @return this for chaining */ @SuppressWarnings("unchecked") @Override public Builder cell(double value, long... indexes) { if (indexes.length != type.dimensions().size()) throw new IllegalArgumentException("Wrong number of indexes (" + indexes.length + ") for " + type); if (indexes.length == 0) { firstDimension = Collections.singletonList(value); return this; } if (firstDimension == null) firstDimension = new ArrayList<>(); List<Object> currentValues = firstDimension; for (int dimensionIndex = 0; dimensionIndex < indexes.length; dimensionIndex++) { ensureCapacity(indexes[dimensionIndex], currentValues); if (dimensionIndex == indexes.length - 1) { currentValues.set((int)indexes[dimensionIndex], value); } else { if (currentValues.get((int)indexes[dimensionIndex]) == null) currentValues.set((int)indexes[dimensionIndex], new ArrayList<>()); currentValues = (List<Object>) currentValues.get((int)indexes[dimensionIndex]); } } return this; } /** Fill the given list with nulls if necessary to make sure it has a (possibly null) value at the given index */ private void ensureCapacity(long index, List<Object> list) { while (list.size() <= index) list.add(list.size(), null); } } private final class CellIterator implements Iterator<Cell> { private long count = 0; private final Indexes indexes = Indexes.of(dimensionSizes, dimensionSizes, size()); private final LazyCell reusedCell = new LazyCell(indexes, Double.NaN); @Override public boolean hasNext() { return count < indexes.size(); } @Override public Cell next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + indexes); count++; indexes.next(); reusedCell.value = get(indexes.toSourceValueIndex()); return reusedCell; } } private final class ValueIterator implements Iterator<Double> { private long count = 0; @Override public boolean hasNext() { return count < size(); } @Override public Double next() { try { return get(count++); } catch (IllegalArgumentException e) { throw new NoSuchElementException("No element at position " + count); } } } private final class SuperspaceIterator implements Iterator<SubspaceIterator> { private final Indexes superindexes; /** The indexes this should iterate over */ private final List<Integer> subdimensionIndexes; /** * The sizes of the space we'll return values of, one value for each dimension of this tensor, * which may be equal to or smaller than the sizes of this tensor */ private final DimensionSizes iterateSizes; private long count = 0; private SuperspaceIterator(Set<String> superdimensionNames, DimensionSizes iterateSizes) { this.iterateSizes = iterateSizes; List<Integer> superdimensionIndexes = new ArrayList<>(superdimensionNames.size()); subdimensionIndexes = new ArrayList<>(superdimensionNames.size()); for (int i = type.dimensions().size() - 1; i >= 0; i-- ) { if (superdimensionNames.contains(type.dimensions().get(i).name())) superdimensionIndexes.add(i); else subdimensionIndexes.add(i); } superindexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, superdimensionIndexes); } @Override public boolean hasNext() { return count < superindexes.size(); } @Override public SubspaceIterator next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + superindexes); count++; superindexes.next(); return new SubspaceIterator(subdimensionIndexes, superindexes.indexesCopy(), iterateSizes); } } /** * An iterator over a subspace of this tensor. This is exposed to allow clients to query the size. * NOTE THAT the Cell returned by next is only valid until the next() call is made. * This is a concession to performance due to this typically being used in inner loops. */ public final class SubspaceIterator implements Iterator<Tensor.Cell> { /** * This iterator will iterate over the given dimensions, in the order given * (the first dimension index given is incremented to exhaustion first (i.e is etc.). * This may be any subset of the dimensions given by address and dimensionSizes. */ private final List<Integer> iterateDimensions; private final long[] address; private final DimensionSizes iterateSizes; private Indexes indexes; private long count = 0; /** A lazy cell for reuse */ private final LazyCell reusedCell; /** * Creates a new subspace iterator * * @param iterateDimensions the dimensions to iterate over, given as indexes in the dimension order of the * type of the tensor this iterates over. This iterator will iterate over these * dimensions to exhaustion in the order given (the first dimension index given is * incremented to exhaustion first etc., while other dimensions will be held * at a constant position. * This may be any subset of the dimensions given by address and dimensionSizes. * This is treated as immutable. * @param address the address of the first cell of this subspace. */ private SubspaceIterator(List<Integer> iterateDimensions, long[] address, DimensionSizes iterateSizes) { this.iterateDimensions = iterateDimensions; this.address = address; this.iterateSizes = iterateSizes; this.indexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, iterateDimensions, address); reusedCell = new LazyCell(indexes, Double.NaN); } /** Returns the total number of cells in this subspace */ public long size() { return indexes.size(); } /** Returns the address of the cell this currently points to (which may be an invalid position) */ public TensorAddress address() { return indexes.toAddress(); } /** Rewind this iterator to the first element */ public void reset() { this.count = 0; this.indexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, iterateDimensions, address); } @Override public boolean hasNext() { return count < indexes.size(); } /** Returns the next cell, which is valid until next() is called again */ @Override public Cell next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + indexes); count++; indexes.next(); reusedCell.value = get(indexes.toSourceValueIndex()); return reusedCell; } } /** A Cell which does not compute its TensorAddress unless it really has to */ private final static class LazyCell extends Tensor.Cell { private double value; private Indexes indexes; private LazyCell(Indexes indexes, Double value) { super(null, value); this.indexes = indexes; } @Override long getDirectIndex() { return indexes.toIterationValueIndex(); } @Override public TensorAddress getKey() { return indexes.toAddress(); } @Override public Double getValue() { return value; } @Override public Cell detach() { return new Cell(getKey(), value); } } /** * An array of indexes into this tensor which are able to find the next index in the value order. * next() can be called once per element in the dimensions we iterate over. It must be called once * before accessing the first position. */ public abstract static class Indexes { private final DimensionSizes sourceSizes; private final DimensionSizes iterationSizes; protected final long[] indexes; /** * Create indexes from a type containing bound indexed dimensions only. * * @throws IllegalStateException if the type contains dimensions which are not bound and indexed */ public static Indexes of(TensorType type) { return of(DimensionSizes.of(type)); } public static Indexes of(TensorType type, List<String> iterateDimensionOrder) { return of(DimensionSizes.of(type), toIterationOrder(iterateDimensionOrder, type)); } public static Indexes of(DimensionSizes sizes) { return of(sizes, sizes); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes) { return of(sourceSizes, iterateSizes, completeIterationOrder(iterateSizes.dimensions())); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long size) { return of(sourceSizes, iterateSizes, completeIterationOrder(iterateSizes.dimensions()), size); } private static Indexes of(DimensionSizes sizes, List<Integer> iterateDimensions) { return of(sizes, sizes, iterateDimensions); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions) { return of(sourceSizes, iterateSizes, iterateDimensions, computeSize(iterateSizes, iterateDimensions)); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long size) { return of(sourceSizes, iterateSizes, iterateDimensions, new long[iterateSizes.dimensions()], size); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes) { return of(sourceSizes, iterateSizes, iterateDimensions, initialIndexes, computeSize(iterateSizes, iterateDimensions)); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { if (size == 0) { return new EmptyIndexes(sourceSizes, iterateSizes, initialIndexes); } else if (size == 1) { return new SingleValueIndexes(sourceSizes, iterateSizes, initialIndexes); } else if (iterateDimensions.size() == 1) { if (sourceSizes.equals(iterateSizes)) return new EqualSizeSingleDimensionIndexes(sourceSizes, iterateDimensions.get(0), initialIndexes, size); else return new SingleDimensionIndexes(sourceSizes, iterateSizes, iterateDimensions.get(0), initialIndexes, size); } else { if (sourceSizes.equals(iterateSizes)) return new EqualSizeMultiDimensionIndexes(sourceSizes, iterateDimensions, initialIndexes, size); else return new MultiDimensionIndexes(sourceSizes, iterateSizes, iterateDimensions, initialIndexes, size); } } private static List<Integer> toIterationOrder(List<String> dimensionNames, TensorType type) { if (dimensionNames == null) return completeIterationOrder(type.rank()); List<Integer> iterationDimensions = new ArrayList<>(type.rank()); for (int i = 0; i < type.rank(); i++) iterationDimensions.add(type.rank() - 1 - type.indexOfDimension(dimensionNames.get(i)).get()); return iterationDimensions; } /** Since the right dimensions binds closest, iteration order is the opposite of the tensor order */ private static List<Integer> completeIterationOrder(int length) { List<Integer> iterationDimensions = new ArrayList<>(length); for (int i = 0; i < length; i++) iterationDimensions.add(length - 1 - i); return iterationDimensions; } private Indexes(DimensionSizes sourceSizes, DimensionSizes iterationSizes, long[] indexes) { this.sourceSizes = sourceSizes; this.iterationSizes = iterationSizes; this.indexes = indexes; } private static long computeSize(DimensionSizes sizes, List<Integer> iterateDimensions) { long size = 1; for (int iterateDimension : iterateDimensions) size *= sizes.size(iterateDimension); return size; } /** Returns the address of the current position of these indexes */ public TensorAddress toAddress() { return TensorAddress.of(indexes); } public long[] indexesCopy() { return Arrays.copyOf(indexes, indexes.length); } /** Returns a copy of the indexes of this which must not be modified */ public long[] indexesForReading() { return indexes; } public long toSourceValueIndex() { return IndexedTensor.toValueIndex(indexes, sourceSizes); } long toIterationValueIndex() { return IndexedTensor.toValueIndex(indexes, iterationSizes); } DimensionSizes dimensionSizes() { return iterationSizes; } /** Returns an immutable list containing a copy of the indexes in this */ public List<Long> toList() { ImmutableList.Builder<Long> builder = new ImmutableList.Builder<>(); for (long index : indexes) builder.add(index); return builder.build(); } @Override public String toString() { return "indexes " + Arrays.toString(indexes); } public abstract long size(); public abstract void next(); /** Returns whether further values are available by calling next() */ public abstract boolean hasNext(); /** Returns the number of dimensions in iteration order which are currently at the start position (0) */ abstract int nextDimensionsAtStart(); /** Returns the number of dimensions in iteration order which are currently at their end position */ abstract int nextDimensionsAtEnd(); } private final static class EmptyIndexes extends Indexes { private EmptyIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long[] indexes) { super(sourceSizes, iterateSizes, indexes); } @Override public long size() { return 0; } @Override public void next() {} @Override public boolean hasNext() { return false; } @Override int nextDimensionsAtStart() { return 0; } @Override int nextDimensionsAtEnd() { return 0; } } private final static class SingleValueIndexes extends Indexes { private boolean exhausted = false; private SingleValueIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long[] indexes) { super(sourceSizes, iterateSizes, indexes); } @Override public long size() { return 1; } @Override public void next() { exhausted = true; } @Override public boolean hasNext() { return ! exhausted; } @Override int nextDimensionsAtStart() { return 1; } @Override int nextDimensionsAtEnd() { return 1; } } private static class MultiDimensionIndexes extends Indexes { private final long size; private final List<Integer> iterateDimensions; private MultiDimensionIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { super(sourceSizes, iterateSizes, initialIndexes); this.iterateDimensions = iterateDimensions; this.size = size; indexes[iterateDimensions.get(0)]--; } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { int iterateDimensionsIndex = 0; while ( indexes[iterateDimensions.get(iterateDimensionsIndex)] + 1 == dimensionSizes().size(iterateDimensions.get(iterateDimensionsIndex))) { indexes[iterateDimensions.get(iterateDimensionsIndex)] = 0; iterateDimensionsIndex++; } indexes[iterateDimensions.get(iterateDimensionsIndex)]++; } @Override public boolean hasNext() { for (int iterateDimension : iterateDimensions) { if (indexes[iterateDimension] + 1 < dimensionSizes().size(iterateDimension)) return true; } return false; } @Override int nextDimensionsAtStart() { int dimension = 0; while (dimension < iterateDimensions.size() && indexes[iterateDimensions.get(dimension)] == 0) dimension++; return dimension; } @Override int nextDimensionsAtEnd() { int dimension = 0; while (dimension < iterateDimensions.size() && indexes[iterateDimensions.get(dimension)] == dimensionSizes().size(iterateDimensions.get(dimension)) - 1) dimension++; return dimension; } } /** In this case we can reuse the source index computation for the iteration index */ private final static class EqualSizeMultiDimensionIndexes extends MultiDimensionIndexes { private long lastComputedSourceValueIndex = -1; private EqualSizeMultiDimensionIndexes(DimensionSizes sizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { super(sizes, sizes, iterateDimensions, initialIndexes, size); } @Override public long toSourceValueIndex() { return lastComputedSourceValueIndex = super.toSourceValueIndex(); } @Override long toIterationValueIndex() { return lastComputedSourceValueIndex; } } /** In this case we can keep track of indexes using a step instead of using the more elaborate computation */ private final static class SingleDimensionIndexes extends Indexes { private final long size; private final int iterateDimension; /** Maintain this directly as an optimization for 1-d iteration */ private long currentSourceValueIndex, currentIterationValueIndex; /** The iteration step in the value index space */ private final long sourceStep, iterationStep; private SingleDimensionIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, int iterateDimension, long[] initialIndexes, long size) { super(sourceSizes, iterateSizes, initialIndexes); this.iterateDimension = iterateDimension; this.size = size; this.sourceStep = productOfDimensionsAfter(iterateDimension, sourceSizes); this.iterationStep = productOfDimensionsAfter(iterateDimension, iterateSizes); indexes[iterateDimension]--; currentSourceValueIndex = IndexedTensor.toValueIndex(indexes, sourceSizes); currentIterationValueIndex = IndexedTensor.toValueIndex(indexes, iterateSizes); } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { indexes[iterateDimension]++; currentSourceValueIndex += sourceStep; currentIterationValueIndex += iterationStep; } @Override public long toSourceValueIndex() { return currentSourceValueIndex; } @Override long toIterationValueIndex() { return currentIterationValueIndex; } @Override public boolean hasNext() { return indexes[iterateDimension] + 1 < size; } @Override int nextDimensionsAtStart() { return currentSourceValueIndex == 0 ? 1 : 0; } @Override int nextDimensionsAtEnd() { return currentSourceValueIndex == size - 1 ? 1 : 0; } } /** In this case we only need to keep track of one index */ private final static class EqualSizeSingleDimensionIndexes extends Indexes { private final long size; private final int iterateDimension; /** Maintain this directly as an optimization for 1-d iteration */ private long currentValueIndex; /** The iteration step in the value index space */ private final long step; private EqualSizeSingleDimensionIndexes(DimensionSizes sizes, int iterateDimension, long[] initialIndexes, long size) { super(sizes, sizes, initialIndexes); this.iterateDimension = iterateDimension; this.size = size; this.step = productOfDimensionsAfter(iterateDimension, sizes); indexes[iterateDimension]--; currentValueIndex = IndexedTensor.toValueIndex(indexes, sizes); } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { indexes[iterateDimension]++; currentValueIndex += step; } @Override public boolean hasNext() { return indexes[iterateDimension] + 1 < size; } @Override public long toSourceValueIndex() { return currentValueIndex; } @Override long toIterationValueIndex() { return currentValueIndex; } @Override int nextDimensionsAtStart() { return currentValueIndex == 0 ? 1 : 0; } @Override int nextDimensionsAtEnd() { return currentValueIndex == size - 1 ? 1 : 0; } } }
class IndexedTensor implements Tensor { /** The prescribed and possibly abstract type this is an instance of */ private final TensorType type; /** The sizes of the dimensions of this in the order of the dimensions of the type */ private final DimensionSizes dimensionSizes; IndexedTensor(TensorType type, DimensionSizes dimensionSizes) { this.type = type; this.dimensionSizes = dimensionSizes; } /** * Returns an iterator over the cells of this in the <i>standard value order</i>. */ @Override public Iterator<Cell> cellIterator() { return new CellIterator(); } /** Returns an iterator over all the cells in this tensor which matches the given partial address */ public SubspaceIterator cellIterator(PartialAddress partialAddress, DimensionSizes iterationSizes) { long[] startAddress = new long[type().dimensions().size()]; List<Integer> iterateDimensions = new ArrayList<>(); for (int i = 0; i < type().dimensions().size(); i++) { long partialAddressLabel = partialAddress.numericLabel(type.dimensions().get(i).name()); if (partialAddressLabel >= 0) startAddress[i] = partialAddressLabel; else iterateDimensions.add(i); } return new SubspaceIterator(iterateDimensions, startAddress, iterationSizes); } /** Returns an iterator over the values of this returned in the <i>standard value order</i> */ @Override public Iterator<Double> valueIterator() { return new ValueIterator(); } /** * Returns an iterator over value iterators where the outer iterator is over each unique value of the dimensions * given and the inner iterator is over each unique value of the rest of the dimensions, in the * <i>standard value order</i> * * @param dimensions the names of the dimensions of the superspace * @param sizes the size of each dimension in the space we are returning values for, containing * one value per dimension of this tensor (in order). Each size may be the same or smaller * than the corresponding size of this tensor */ public Iterator<SubspaceIterator> subspaceIterator(Set<String> dimensions, DimensionSizes sizes) { return new SuperspaceIterator(dimensions, sizes); } /** Returns a subspace iterator having the sizes of the dimensions of this tensor */ public Iterator<SubspaceIterator> subspaceIterator(Set<String> dimensions) { return subspaceIterator(dimensions, dimensionSizes); } /** * Returns the value at the given indexes as a double * * @param indexes the indexes into the dimensions of this. Must be one number per dimension of this * @throws IllegalArgumentException if any of the indexes are out of bound or a wrong number of indexes are given */ public double get(long ... indexes) { return get((int)toValueIndex(indexes, dimensionSizes)); } /** * Returns the value at the given indexes as a float * * @param indexes the indexes into the dimensions of this. Must be one number per dimension of this * @throws IllegalArgumentException if any of the indexes are out of bound or a wrong number of indexes are given */ public float getFloat(long ... indexes) { return getFloat((int)toValueIndex(indexes, dimensionSizes)); } /** Returns the value at this address, or NaN if there is no value at this address */ @Override public double get(TensorAddress address) { try { return get((int)toValueIndex(address, dimensionSizes, type)); } catch (IllegalArgumentException e) { return Double.NaN; } } /** * Returns the value at the given <i>standard value order</i> index as a double. * * @param valueIndex the direct index into the underlying data. * @throws IllegalArgumentException if index is out of bounds */ public abstract double get(long valueIndex); /** * Returns the value at the given <i>standard value order</i> index as a float. * * @param valueIndex the direct index into the underlying data. * @throws IllegalArgumentException if index is out of bounds */ public abstract float getFloat(long valueIndex); static long toValueIndex(long[] indexes, DimensionSizes sizes) { if (indexes.length == 1) return indexes[0]; if (indexes.length == 0) return 0; long valueIndex = 0; for (int i = 0; i < indexes.length; i++) { if (indexes[i] >= sizes.size(i)) throw new IllegalArgumentException(Arrays.toString(indexes) + " are not within bounds"); valueIndex += productOfDimensionsAfter(i, sizes) * indexes[i]; } return valueIndex; } static long toValueIndex(TensorAddress address, DimensionSizes sizes, TensorType type) { if (address.isEmpty()) return 0; long valueIndex = 0; for (int i = 0; i < address.size(); i++) { if (address.numericLabel(i) >= sizes.size(i)) throw new IllegalArgumentException(address + " is not within the bounds of " + type); valueIndex += productOfDimensionsAfter(i, sizes) * address.numericLabel(i); } return valueIndex; } private static long productOfDimensionsAfter(int afterIndex, DimensionSizes sizes) { long product = 1; for (int i = afterIndex + 1; i < sizes.dimensions(); i++) product *= sizes.size(i); return product; } void throwOnIncompatibleType(TensorType type) { if ( ! this.type().isRenamableTo(type)) throw new IllegalArgumentException("Can not change type from " + this.type() + " to " + type + ": Types are not compatible"); } @Override public TensorType type() { return type; } @Override public abstract IndexedTensor withType(TensorType type); public DimensionSizes dimensionSizes() { return dimensionSizes; } @Override public Map<TensorAddress, Double> cells() { if (dimensionSizes.dimensions() == 0) return Collections.singletonMap(TensorAddress.of(), get(0)); ImmutableMap.Builder<TensorAddress, Double> builder = new ImmutableMap.Builder<>(); Indexes indexes = Indexes.of(dimensionSizes, dimensionSizes, size()); for (long i = 0; i < size(); i++) { indexes.next(); builder.put(indexes.toAddress(), get(i)); } return builder.build(); } @Override public Tensor remove(Set<TensorAddress> addresses) { throw new IllegalArgumentException("Remove is not supported for indexed tensors"); } @Override public String toString() { if (type.rank() == 0) return Tensor.toStandardString(this); if (type.dimensions().stream().anyMatch(d -> d.size().isEmpty())) return Tensor.toStandardString(this); Indexes indexes = Indexes.of(dimensionSizes); StringBuilder b = new StringBuilder(type.toString()).append(":"); indexedBlockToString(this, indexes, b); return b.toString(); } @Override public boolean equals(Object other) { if ( ! ( other instanceof Tensor)) return false; return Tensor.equals(this, ((Tensor)other)); } public abstract static class Builder implements Tensor.Builder { final TensorType type; private Builder(TensorType type) { this.type = type; } public static Builder of(TensorType type) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type)); else return new UnboundBuilder(type); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor. This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, float[] values) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type), values); else return new UnboundBuilder(type); } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor. This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, double[] values) { if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension)) return of(type, BoundBuilder.dimensionSizesOf(type), values); else return new UnboundBuilder(type); } /** * Create a builder with dimension size information for this instance. Must be one size entry per dimension, * and, agree with the type size information when specified in the type. * If sizes are completely specified in the type this size information is redundant. */ public static Builder of(TensorType type, DimensionSizes sizes) { validate(type, sizes); switch (type.valueType()) { case DOUBLE: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes); case FLOAT: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); case BFLOAT16: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); case INT8: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes); default: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes); } } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor in the <i>standard value order</i>. * This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, DimensionSizes sizes, float[] values) { validate(type, sizes); validateSizes(sizes, values.length); switch (type.valueType()) { case DOUBLE: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes).fill(values); case FLOAT: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes, values); case BFLOAT16: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes, values); case INT8: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes, values); default: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes).fill(values); } } /** * Creates a builder initialized with the given values * * @param type the type of the tensor to build * @param values the initial values of the tensor in the <i>standard value order</i>. * This <b>transfers ownership</b> of the value array - it * must not be further mutated by the caller */ public static Builder of(TensorType type, DimensionSizes sizes, double[] values) { validate(type, sizes); validateSizes(sizes, values.length); switch (type.valueType()) { case DOUBLE: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes, values); case FLOAT: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); case BFLOAT16: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); case INT8: return new IndexedFloatTensor.BoundFloatBuilder(type, sizes).fill(values); default: return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes, values); } } private static void validateSizes(DimensionSizes sizes, int length) { if (sizes.totalSize() != length) { throw new IllegalArgumentException("Invalid size(" + length + ") of supplied value vector." + " Type specifies that size should be " + sizes.totalSize()); } } private static void validate(TensorType type, DimensionSizes sizes) { if (sizes.dimensions() != type.dimensions().size()) throw new IllegalArgumentException(sizes.dimensions() + " is the wrong number of dimensions for " + type); for (int i = 0; i < sizes.dimensions(); i++ ) { Optional<Long> size = type.dimensions().get(i).size(); if (size.isPresent() && size.get() < sizes.size(i)) throw new IllegalArgumentException("Size of dimension " + type.dimensions().get(i).name() + " is " + sizes.size(i) + " but cannot be larger than " + size.get() + " in " + type); } } public abstract Builder cell(double value, long ... indexes); public abstract Builder cell(float value, long ... indexes); @Override public TensorType type() { return type; } @Override public abstract IndexedTensor build(); } public interface DirectIndexBuilder { TensorType type(); /** Sets a value by its <i>standard value order</i> index */ void cellByDirectIndex(long index, double value); /** Sets a value by its <i>standard value order</i> index */ void cellByDirectIndex(long index, float value); } /** A bound builder can create the double array directly */ public static abstract class BoundBuilder extends Builder implements DirectIndexBuilder { private DimensionSizes sizes; private static DimensionSizes dimensionSizesOf(TensorType type) { DimensionSizes.Builder b = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < type.dimensions().size(); i++) b.set(i, type.dimensions().get(i).size().get()); return b.build(); } BoundBuilder(TensorType type, DimensionSizes sizes) { super(type); if ( sizes.dimensions() != type.dimensions().size()) throw new IllegalArgumentException("Must have a dimension size entry for each dimension in " + type); this.sizes = sizes; } BoundBuilder fill(float[] values) { long index = 0; for (float value : values) { cellByDirectIndex(index++, value); } return this; } BoundBuilder fill(double[] values) { long index = 0; for (double value : values) { cellByDirectIndex(index++, value); } return this; } DimensionSizes sizes() { return sizes; } } /** * A builder used when we don't know the size of the dimensions up front. * All values is all dimensions must be specified. */ private static class UnboundBuilder extends Builder { /** List of List or Double */ private List<Object> firstDimension = null; private UnboundBuilder(TensorType type) { super(type); } @Override public IndexedTensor build() { if (firstDimension == null) throw new IllegalArgumentException("Tensor of type " + type() + " has no values"); if (type.dimensions().isEmpty()) return new IndexedDoubleTensor(type, new DimensionSizes.Builder(type.dimensions().size()).build(), new double[] {(Double) firstDimension.get(0) }); DimensionSizes dimensionSizes = findDimensionSizes(firstDimension); double[] values = new double[(int)dimensionSizes.totalSize()]; fillValues(0, 0, firstDimension, dimensionSizes, values); return new IndexedDoubleTensor(type, dimensionSizes, values); } private DimensionSizes findDimensionSizes(List<Object> firstDimension) { List<Long> dimensionSizeList = new ArrayList<>(type.dimensions().size()); findDimensionSizes(0, dimensionSizeList, firstDimension); DimensionSizes.Builder b = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < b.dimensions(); i++) { if (i < dimensionSizeList.size()) b.set(i, dimensionSizeList.get(i)); } return b.build(); } @SuppressWarnings("unchecked") private void findDimensionSizes(int currentDimensionIndex, List<Long> dimensionSizes, List<Object> currentDimension) { if (currentDimensionIndex == dimensionSizes.size()) dimensionSizes.add((long)currentDimension.size()); else if (dimensionSizes.get(currentDimensionIndex) != currentDimension.size()) throw new IllegalArgumentException("Missing values in dimension " + type.dimensions().get(currentDimensionIndex) + " in " + type); for (Object value : currentDimension) if (value instanceof List) findDimensionSizes(currentDimensionIndex + 1, dimensionSizes, (List<Object>)value); } @SuppressWarnings("unchecked") private void fillValues(int currentDimensionIndex, long offset, List<Object> currentDimension, DimensionSizes sizes, double[] values) { if (currentDimensionIndex < sizes.dimensions() - 1) { for (long i = 0; i < currentDimension.size(); i++) fillValues(currentDimensionIndex + 1, offset + productOfDimensionsAfter(currentDimensionIndex, sizes) * i, (List<Object>) currentDimension.get((int)i), sizes, values); } else { for (long i = 0; i < currentDimension.size(); i++) { values[(int)(offset + i)] = nullAsZero((Double)currentDimension.get((int)i)); } } } private double nullAsZero(Double value) { if (value == null) return 0; return value; } @Override public CellBuilder cell() { return new CellBuilder(type, this); } @Override public Builder cell(TensorAddress address, float value) { return cell(address, (double)value); } @Override public Builder cell(TensorAddress address, double value) { long[] indexes = new long[address.size()]; for (int i = 0; i < address.size(); i++) { indexes[i] = address.numericLabel(i); } cell(value, indexes); return this; } @Override public Builder cell(float value, long... indexes) { return cell((double)value, indexes); } /** * Set a value using an index API. The number of indexes must be the same as the dimensions in the type of this. * Values can be written in any order but all values needed to make this dense must be provided * before building this. * * @return this for chaining */ @SuppressWarnings("unchecked") @Override public Builder cell(double value, long... indexes) { if (indexes.length != type.dimensions().size()) throw new IllegalArgumentException("Wrong number of indexes (" + indexes.length + ") for " + type); if (indexes.length == 0) { firstDimension = Collections.singletonList(value); return this; } if (firstDimension == null) firstDimension = new ArrayList<>(); List<Object> currentValues = firstDimension; for (int dimensionIndex = 0; dimensionIndex < indexes.length; dimensionIndex++) { ensureCapacity(indexes[dimensionIndex], currentValues); if (dimensionIndex == indexes.length - 1) { currentValues.set((int)indexes[dimensionIndex], value); } else { if (currentValues.get((int)indexes[dimensionIndex]) == null) currentValues.set((int)indexes[dimensionIndex], new ArrayList<>()); currentValues = (List<Object>) currentValues.get((int)indexes[dimensionIndex]); } } return this; } /** Fill the given list with nulls if necessary to make sure it has a (possibly null) value at the given index */ private void ensureCapacity(long index, List<Object> list) { while (list.size() <= index) list.add(list.size(), null); } } private final class CellIterator implements Iterator<Cell> { private long count = 0; private final Indexes indexes = Indexes.of(dimensionSizes, dimensionSizes, size()); private final LazyCell reusedCell = new LazyCell(indexes, Double.NaN); @Override public boolean hasNext() { return count < indexes.size(); } @Override public Cell next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + indexes); count++; indexes.next(); reusedCell.value = get(indexes.toSourceValueIndex()); return reusedCell; } } private final class ValueIterator implements Iterator<Double> { private long count = 0; @Override public boolean hasNext() { return count < size(); } @Override public Double next() { try { return get(count++); } catch (IllegalArgumentException e) { throw new NoSuchElementException("No element at position " + count); } } } private final class SuperspaceIterator implements Iterator<SubspaceIterator> { private final Indexes superindexes; /** The indexes this should iterate over */ private final List<Integer> subdimensionIndexes; /** * The sizes of the space we'll return values of, one value for each dimension of this tensor, * which may be equal to or smaller than the sizes of this tensor */ private final DimensionSizes iterateSizes; private long count = 0; private SuperspaceIterator(Set<String> superdimensionNames, DimensionSizes iterateSizes) { this.iterateSizes = iterateSizes; List<Integer> superdimensionIndexes = new ArrayList<>(superdimensionNames.size()); subdimensionIndexes = new ArrayList<>(superdimensionNames.size()); for (int i = type.dimensions().size() - 1; i >= 0; i-- ) { if (superdimensionNames.contains(type.dimensions().get(i).name())) superdimensionIndexes.add(i); else subdimensionIndexes.add(i); } superindexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, superdimensionIndexes); } @Override public boolean hasNext() { return count < superindexes.size(); } @Override public SubspaceIterator next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + superindexes); count++; superindexes.next(); return new SubspaceIterator(subdimensionIndexes, superindexes.indexesCopy(), iterateSizes); } } /** * An iterator over a subspace of this tensor. This is exposed to allow clients to query the size. * NOTE THAT the Cell returned by next is only valid until the next() call is made. * This is a concession to performance due to this typically being used in inner loops. */ public final class SubspaceIterator implements Iterator<Tensor.Cell> { /** * This iterator will iterate over the given dimensions, in the order given * (the first dimension index given is incremented to exhaustion first (i.e is etc.). * This may be any subset of the dimensions given by address and dimensionSizes. */ private final List<Integer> iterateDimensions; private final long[] address; private final DimensionSizes iterateSizes; private Indexes indexes; private long count = 0; /** A lazy cell for reuse */ private final LazyCell reusedCell; /** * Creates a new subspace iterator * * @param iterateDimensions the dimensions to iterate over, given as indexes in the dimension order of the * type of the tensor this iterates over. This iterator will iterate over these * dimensions to exhaustion in the order given (the first dimension index given is * incremented to exhaustion first etc., while other dimensions will be held * at a constant position. * This may be any subset of the dimensions given by address and dimensionSizes. * This is treated as immutable. * @param address the address of the first cell of this subspace. */ private SubspaceIterator(List<Integer> iterateDimensions, long[] address, DimensionSizes iterateSizes) { this.iterateDimensions = iterateDimensions; this.address = address; this.iterateSizes = iterateSizes; this.indexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, iterateDimensions, address); reusedCell = new LazyCell(indexes, Double.NaN); } /** Returns the total number of cells in this subspace */ public long size() { return indexes.size(); } /** Returns the address of the cell this currently points to (which may be an invalid position) */ public TensorAddress address() { return indexes.toAddress(); } /** Rewind this iterator to the first element */ public void reset() { this.count = 0; this.indexes = Indexes.of(IndexedTensor.this.dimensionSizes, iterateSizes, iterateDimensions, address); } @Override public boolean hasNext() { return count < indexes.size(); } /** Returns the next cell, which is valid until next() is called again */ @Override public Cell next() { if ( ! hasNext()) throw new NoSuchElementException("No cell at " + indexes); count++; indexes.next(); reusedCell.value = get(indexes.toSourceValueIndex()); return reusedCell; } } /** A Cell which does not compute its TensorAddress unless it really has to */ private final static class LazyCell extends Tensor.Cell { private double value; private Indexes indexes; private LazyCell(Indexes indexes, Double value) { super(null, value); this.indexes = indexes; } @Override long getDirectIndex() { return indexes.toIterationValueIndex(); } @Override public TensorAddress getKey() { return indexes.toAddress(); } @Override public Double getValue() { return value; } @Override public Cell detach() { return new Cell(getKey(), value); } } /** * An array of indexes into this tensor which are able to find the next index in the value order. * next() can be called once per element in the dimensions we iterate over. It must be called once * before accessing the first position. */ public abstract static class Indexes { private final DimensionSizes sourceSizes; private final DimensionSizes iterationSizes; protected final long[] indexes; /** * Create indexes from a type containing bound indexed dimensions only. * * @throws IllegalStateException if the type contains dimensions which are not bound and indexed */ public static Indexes of(TensorType type) { return of(DimensionSizes.of(type)); } public static Indexes of(TensorType type, List<String> iterateDimensionOrder) { return of(DimensionSizes.of(type), toIterationOrder(iterateDimensionOrder, type)); } public static Indexes of(DimensionSizes sizes) { return of(sizes, sizes); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes) { return of(sourceSizes, iterateSizes, completeIterationOrder(iterateSizes.dimensions())); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long size) { return of(sourceSizes, iterateSizes, completeIterationOrder(iterateSizes.dimensions()), size); } private static Indexes of(DimensionSizes sizes, List<Integer> iterateDimensions) { return of(sizes, sizes, iterateDimensions); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions) { return of(sourceSizes, iterateSizes, iterateDimensions, computeSize(iterateSizes, iterateDimensions)); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long size) { return of(sourceSizes, iterateSizes, iterateDimensions, new long[iterateSizes.dimensions()], size); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes) { return of(sourceSizes, iterateSizes, iterateDimensions, initialIndexes, computeSize(iterateSizes, iterateDimensions)); } private static Indexes of(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { if (size == 0) { return new EmptyIndexes(sourceSizes, iterateSizes, initialIndexes); } else if (size == 1) { return new SingleValueIndexes(sourceSizes, iterateSizes, initialIndexes); } else if (iterateDimensions.size() == 1) { if (sourceSizes.equals(iterateSizes)) return new EqualSizeSingleDimensionIndexes(sourceSizes, iterateDimensions.get(0), initialIndexes, size); else return new SingleDimensionIndexes(sourceSizes, iterateSizes, iterateDimensions.get(0), initialIndexes, size); } else { if (sourceSizes.equals(iterateSizes)) return new EqualSizeMultiDimensionIndexes(sourceSizes, iterateDimensions, initialIndexes, size); else return new MultiDimensionIndexes(sourceSizes, iterateSizes, iterateDimensions, initialIndexes, size); } } private static List<Integer> toIterationOrder(List<String> dimensionNames, TensorType type) { if (dimensionNames == null) return completeIterationOrder(type.rank()); List<Integer> iterationDimensions = new ArrayList<>(type.rank()); for (int i = 0; i < type.rank(); i++) iterationDimensions.add(type.rank() - 1 - type.indexOfDimension(dimensionNames.get(i)).get()); return iterationDimensions; } /** Since the right dimensions binds closest, iteration order is the opposite of the tensor order */ private static List<Integer> completeIterationOrder(int length) { List<Integer> iterationDimensions = new ArrayList<>(length); for (int i = 0; i < length; i++) iterationDimensions.add(length - 1 - i); return iterationDimensions; } private Indexes(DimensionSizes sourceSizes, DimensionSizes iterationSizes, long[] indexes) { this.sourceSizes = sourceSizes; this.iterationSizes = iterationSizes; this.indexes = indexes; } private static long computeSize(DimensionSizes sizes, List<Integer> iterateDimensions) { long size = 1; for (int iterateDimension : iterateDimensions) size *= sizes.size(iterateDimension); return size; } /** Returns the address of the current position of these indexes */ public TensorAddress toAddress() { return TensorAddress.of(indexes); } public long[] indexesCopy() { return Arrays.copyOf(indexes, indexes.length); } /** Returns a copy of the indexes of this which must not be modified */ public long[] indexesForReading() { return indexes; } public long toSourceValueIndex() { return IndexedTensor.toValueIndex(indexes, sourceSizes); } long toIterationValueIndex() { return IndexedTensor.toValueIndex(indexes, iterationSizes); } DimensionSizes dimensionSizes() { return iterationSizes; } /** Returns an immutable list containing a copy of the indexes in this */ public List<Long> toList() { ImmutableList.Builder<Long> builder = new ImmutableList.Builder<>(); for (long index : indexes) builder.add(index); return builder.build(); } @Override public String toString() { return "indexes " + Arrays.toString(indexes); } public abstract long size(); public abstract void next(); /** Returns whether further values are available by calling next() */ public abstract boolean hasNext(); /** Returns the number of dimensions in iteration order which are currently at the start position (0) */ abstract int nextDimensionsAtStart(); /** Returns the number of dimensions in iteration order which are currently at their end position */ abstract int nextDimensionsAtEnd(); } private final static class EmptyIndexes extends Indexes { private EmptyIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long[] indexes) { super(sourceSizes, iterateSizes, indexes); } @Override public long size() { return 0; } @Override public void next() {} @Override public boolean hasNext() { return false; } @Override int nextDimensionsAtStart() { return 0; } @Override int nextDimensionsAtEnd() { return 0; } } private final static class SingleValueIndexes extends Indexes { private boolean exhausted = false; private SingleValueIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, long[] indexes) { super(sourceSizes, iterateSizes, indexes); } @Override public long size() { return 1; } @Override public void next() { exhausted = true; } @Override public boolean hasNext() { return ! exhausted; } @Override int nextDimensionsAtStart() { return 1; } @Override int nextDimensionsAtEnd() { return 1; } } private static class MultiDimensionIndexes extends Indexes { private final long size; private final List<Integer> iterateDimensions; private MultiDimensionIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { super(sourceSizes, iterateSizes, initialIndexes); this.iterateDimensions = iterateDimensions; this.size = size; indexes[iterateDimensions.get(0)]--; } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { int iterateDimensionsIndex = 0; while ( indexes[iterateDimensions.get(iterateDimensionsIndex)] + 1 == dimensionSizes().size(iterateDimensions.get(iterateDimensionsIndex))) { indexes[iterateDimensions.get(iterateDimensionsIndex)] = 0; iterateDimensionsIndex++; } indexes[iterateDimensions.get(iterateDimensionsIndex)]++; } @Override public boolean hasNext() { for (int iterateDimension : iterateDimensions) { if (indexes[iterateDimension] + 1 < dimensionSizes().size(iterateDimension)) return true; } return false; } @Override int nextDimensionsAtStart() { int dimension = 0; while (dimension < iterateDimensions.size() && indexes[iterateDimensions.get(dimension)] == 0) dimension++; return dimension; } @Override int nextDimensionsAtEnd() { int dimension = 0; while (dimension < iterateDimensions.size() && indexes[iterateDimensions.get(dimension)] == dimensionSizes().size(iterateDimensions.get(dimension)) - 1) dimension++; return dimension; } } /** In this case we can reuse the source index computation for the iteration index */ private final static class EqualSizeMultiDimensionIndexes extends MultiDimensionIndexes { private long lastComputedSourceValueIndex = -1; private EqualSizeMultiDimensionIndexes(DimensionSizes sizes, List<Integer> iterateDimensions, long[] initialIndexes, long size) { super(sizes, sizes, iterateDimensions, initialIndexes, size); } @Override public long toSourceValueIndex() { return lastComputedSourceValueIndex = super.toSourceValueIndex(); } @Override long toIterationValueIndex() { return lastComputedSourceValueIndex; } } /** In this case we can keep track of indexes using a step instead of using the more elaborate computation */ private final static class SingleDimensionIndexes extends Indexes { private final long size; private final int iterateDimension; /** Maintain this directly as an optimization for 1-d iteration */ private long currentSourceValueIndex, currentIterationValueIndex; /** The iteration step in the value index space */ private final long sourceStep, iterationStep; private SingleDimensionIndexes(DimensionSizes sourceSizes, DimensionSizes iterateSizes, int iterateDimension, long[] initialIndexes, long size) { super(sourceSizes, iterateSizes, initialIndexes); this.iterateDimension = iterateDimension; this.size = size; this.sourceStep = productOfDimensionsAfter(iterateDimension, sourceSizes); this.iterationStep = productOfDimensionsAfter(iterateDimension, iterateSizes); indexes[iterateDimension]--; currentSourceValueIndex = IndexedTensor.toValueIndex(indexes, sourceSizes); currentIterationValueIndex = IndexedTensor.toValueIndex(indexes, iterateSizes); } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { indexes[iterateDimension]++; currentSourceValueIndex += sourceStep; currentIterationValueIndex += iterationStep; } @Override public long toSourceValueIndex() { return currentSourceValueIndex; } @Override long toIterationValueIndex() { return currentIterationValueIndex; } @Override public boolean hasNext() { return indexes[iterateDimension] + 1 < size; } @Override int nextDimensionsAtStart() { return currentSourceValueIndex == 0 ? 1 : 0; } @Override int nextDimensionsAtEnd() { return currentSourceValueIndex == size - 1 ? 1 : 0; } } /** In this case we only need to keep track of one index */ private final static class EqualSizeSingleDimensionIndexes extends Indexes { private final long size; private final int iterateDimension; /** Maintain this directly as an optimization for 1-d iteration */ private long currentValueIndex; /** The iteration step in the value index space */ private final long step; private EqualSizeSingleDimensionIndexes(DimensionSizes sizes, int iterateDimension, long[] initialIndexes, long size) { super(sizes, sizes, initialIndexes); this.iterateDimension = iterateDimension; this.size = size; this.step = productOfDimensionsAfter(iterateDimension, sizes); indexes[iterateDimension]--; currentValueIndex = IndexedTensor.toValueIndex(indexes, sizes); } /** Returns the number of values this will iterate over - i.e the product if the iterating dimension sizes */ @Override public long size() { return size; } /** * Advances this to the next cell in the standard indexed tensor cell order. * The first call to this will put it at the first position. * * @throws RuntimeException if this is called when hasNext returns false */ @Override public void next() { indexes[iterateDimension]++; currentValueIndex += step; } @Override public boolean hasNext() { return indexes[iterateDimension] + 1 < size; } @Override public long toSourceValueIndex() { return currentValueIndex; } @Override long toIterationValueIndex() { return currentValueIndex; } @Override int nextDimensionsAtStart() { return currentValueIndex == 0 ? 1 : 0; } @Override int nextDimensionsAtEnd() { return currentValueIndex == size - 1 ? 1 : 0; } } }
Can be simplified to `return copyOf(List.of(nodes))`.
public static NodeList of(Node ... nodes) { List<Node> nodeList = new ArrayList<>(); Collections.addAll(nodeList, nodes); return copyOf(nodeList); }
return copyOf(nodeList);
public static NodeList of(Node ... nodes) { return copyOf(List.of(nodes)); }
class NodeList extends AbstractFilteringList<Node, NodeList> { protected NodeList(List<Node> nodes, boolean negate) { super(nodes, negate, NodeList::new); } /** Returns the node with the given hostname from this list, or empty if it is not present */ public Optional<Node> node(String hostname) { return matching(node -> node.hostname().equals(hostname)).first(); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().retired()); } /** Returns the subset of nodes that are being deprovisioned */ public NodeList deprovisioning() { return matching(node -> node.status().wantToRetire() && node.status().wantToDeprovision()); } /** Returns the subset of nodes which are removable */ public NodeList removable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().isRemovable()); } /** Returns the subset of nodes having exactly the given resources */ public NodeList resources(NodeResources resources) { return matching(node -> node.resources().equals(resources)); } /** Returns the subset of nodes which satisfy the given resources */ public NodeList satisfies(NodeResources resources) { return matching(node -> node.resources().satisfies(resources)); } /** Returns the subset of nodes not in the given set */ public NodeList except(Set<Node> nodes) { return matching(node -> ! nodes.contains(node)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes that run containers */ public NodeList container() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().isContainer()); } /** Returns the subset of nodes that run a stateless service */ public NodeList stateless() { return matching(node -> node.allocation().isPresent() && ! node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that run a stateful service */ public NodeList stateful() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that are currently changing their Vespa version */ public NodeList changingVersion() { return matching(node -> node.status().vespaVersion().isPresent() && node.allocation().isPresent() && !node.status().vespaVersion().get().equals(node.allocation().get().membership().cluster().vespaVersion())); } /** Returns the subset of nodes with want to fail set to true */ public NodeList failing() { return matching(node -> node.status().wantToFail()); } /** Returns the subset of nodes that are currently changing their OS version to given version */ public NodeList changingOsVersionTo(Version version) { return matching(node -> node.status().osVersion().changingTo(version)); } /** Returns the subset of nodes that are currently changing their OS version */ public NodeList changingOsVersion() { return matching(node -> node.status().osVersion().changing()); } /** Returns a copy of this sorted by current OS version (lowest to highest) */ public NodeList byIncreasingOsVersion() { return sortedBy(Comparator.comparing(node -> node.status() .osVersion() .current() .orElse(Version.emptyVersion))); } /** Returns the subset of nodes that are currently on a lower version than the given version */ public NodeList osVersionIsBefore(Version version) { return matching(node -> node.status().osVersion().isBefore(version)); } /** Returns the subset of nodes that are currently on the given OS version */ public NodeList onOsVersion(Version version) { return matching(node -> node.status().osVersion().matches(version)); } /** Returns the subset of nodes assigned to the given cluster */ public NodeList cluster(ClusterSpec.Id cluster) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().id().equals(cluster)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return matching(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes allocated to a tester instance */ public NodeList tester() { return matching(node -> node.allocation().isPresent() && node.allocation().get().owner().instance().isTester()); } /** Returns the subset of nodes matching the given node type(s) */ public NodeList nodeType(NodeType first, NodeType... rest) { if (rest.length == 0) { return matching(node -> node.type() == first); } EnumSet<NodeType> nodeTypes = EnumSet.of(first, rest); return matching(node -> nodeTypes.contains(node.type())); } /** Returns the subset of nodes of the host type */ public NodeList hosts() { return nodeType(NodeType.host); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return matching(node -> node.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return matching(node -> node.hasParent(hostname)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Node.State first, Node.State... rest) { if (rest.length == 0) { return matching(node -> node.state() == first); } return state(EnumSet.of(first, rest)); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Set<Node.State> nodeStates) { return matching(node -> nodeStates.contains(node.state())); } /** Returns the subset of nodes which have a record of being down */ public NodeList down() { return matching(Node::isDown); } /** Returns the subset of nodes which have retirement requested */ public NodeList retirementRequested() { return matching(node -> node.status().wantToRetire() || node.status().preferToRetire()); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(NodeList children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); } /** Returns the nodes contained in the group identified by given index */ public NodeList group(int index) { return matching(n -> n.allocation().isPresent() && n.allocation().get().membership().cluster().group().equals(Optional.of(ClusterSpec.Group.from(index)))); } /** Returns the parent node of the given child node */ public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> stream().filter(node -> node.hostname().equals(parentHostname)) .findFirst()); } /** * Returns the cluster spec of the nodes in this, without any group designation * * @throws IllegalStateException if there are no nodes in thus list or they do not all belong * to the same cluster */ public ClusterSpec clusterSpec() { ensureSingleCluster(); if (isEmpty()) throw new IllegalStateException("No nodes"); return first().get().allocation().get().membership().cluster().with(Optional.empty()); } /** * Returns the resources of the nodes of this. * * NOTE: If the nodes do not all have the same values of node resources, a random pick among those node resources * will be returned. * * @throws IllegalStateException if the nodes in this do not all belong to the same cluster */ public ClusterResources toResources() { ensureSingleCluster(); if (isEmpty()) return new ClusterResources(0, 0, NodeResources.unspecified()); return new ClusterResources(size(), (int)stream().map(node -> node.allocation().get().membership().cluster().group().get()) .distinct() .count(), first().get().resources()); } /** Returns the nodes that are allocated on an exclusive network switch within its cluster */ public NodeList onExclusiveSwitch(NodeList clusterHosts) { ensureSingleCluster(); Map<String, Long> switchCount = clusterHosts.stream() .flatMap(host -> host.switchHostname().stream()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); return matching(node -> { Optional<Node> nodeOnSwitch = clusterHosts.parentOf(node); if (node.parentHostname().isPresent()) { if (nodeOnSwitch.isEmpty()) { throw new IllegalArgumentException("Parent of " + node + ", " + node.parentHostname().get() + ", not found in given cluster hosts"); } } else { nodeOnSwitch = Optional.of(node); } Optional<String> allocatedSwitch = nodeOnSwitch.flatMap(Node::switchHostname); return allocatedSwitch.isEmpty() || switchCount.get(allocatedSwitch.get()) == 1; }); } private void ensureSingleCluster() { if (isEmpty()) return; if (stream().anyMatch(node -> node.allocation().isEmpty())) throw new IllegalStateException("Some nodes are not allocated to a cluster"); ClusterSpec firstNodeSpec = first().get().allocation().get().membership().cluster().with(Optional.empty()); if (stream().map(node -> node.allocation().get().membership().cluster().with(Optional.empty())) .anyMatch(clusterSpec -> ! clusterSpec.equals(firstNodeSpec))) throw new IllegalStateException("Nodes belong to multiple clusters"); } /** Returns the nodes of this as a stream */ public Stream<Node> stream() { return asList().stream(); } public static NodeList copyOf(List<Node> nodes) { return new NodeList(nodes, false); } @Override public String toString() { return asList().toString(); } @Override public int hashCode() { return asList().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeList)) return false; return this.asList().equals(((NodeList) other).asList()); } }
class NodeList extends AbstractFilteringList<Node, NodeList> { protected NodeList(List<Node> nodes, boolean negate) { super(nodes, negate, NodeList::new); } /** Returns the node with the given hostname from this list, or empty if it is not present */ public Optional<Node> node(String hostname) { return matching(node -> node.hostname().equals(hostname)).first(); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().retired()); } /** Returns the subset of nodes that are being deprovisioned */ public NodeList deprovisioning() { return matching(node -> node.status().wantToRetire() && node.status().wantToDeprovision()); } /** Returns the subset of nodes which are removable */ public NodeList removable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().isRemovable()); } /** Returns the subset of nodes having exactly the given resources */ public NodeList resources(NodeResources resources) { return matching(node -> node.resources().equals(resources)); } /** Returns the subset of nodes which satisfy the given resources */ public NodeList satisfies(NodeResources resources) { return matching(node -> node.resources().satisfies(resources)); } /** Returns the subset of nodes not in the given set */ public NodeList except(Set<Node> nodes) { return matching(node -> ! nodes.contains(node)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes that run containers */ public NodeList container() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().isContainer()); } /** Returns the subset of nodes that run a stateless service */ public NodeList stateless() { return matching(node -> node.allocation().isPresent() && ! node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that run a stateful service */ public NodeList stateful() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that are currently changing their Vespa version */ public NodeList changingVersion() { return matching(node -> node.status().vespaVersion().isPresent() && node.allocation().isPresent() && !node.status().vespaVersion().get().equals(node.allocation().get().membership().cluster().vespaVersion())); } /** Returns the subset of nodes with want to fail set to true */ public NodeList failing() { return matching(node -> node.status().wantToFail()); } /** Returns the subset of nodes that are currently changing their OS version to given version */ public NodeList changingOsVersionTo(Version version) { return matching(node -> node.status().osVersion().changingTo(version)); } /** Returns the subset of nodes that are currently changing their OS version */ public NodeList changingOsVersion() { return matching(node -> node.status().osVersion().changing()); } /** Returns a copy of this sorted by current OS version (lowest to highest) */ public NodeList byIncreasingOsVersion() { return sortedBy(Comparator.comparing(node -> node.status() .osVersion() .current() .orElse(Version.emptyVersion))); } /** Returns the subset of nodes that are currently on a lower version than the given version */ public NodeList osVersionIsBefore(Version version) { return matching(node -> node.status().osVersion().isBefore(version)); } /** Returns the subset of nodes that are currently on the given OS version */ public NodeList onOsVersion(Version version) { return matching(node -> node.status().osVersion().matches(version)); } /** Returns the subset of nodes assigned to the given cluster */ public NodeList cluster(ClusterSpec.Id cluster) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().id().equals(cluster)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return matching(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes allocated to a tester instance */ public NodeList tester() { return matching(node -> node.allocation().isPresent() && node.allocation().get().owner().instance().isTester()); } /** Returns the subset of nodes matching the given node type(s) */ public NodeList nodeType(NodeType first, NodeType... rest) { if (rest.length == 0) { return matching(node -> node.type() == first); } EnumSet<NodeType> nodeTypes = EnumSet.of(first, rest); return matching(node -> nodeTypes.contains(node.type())); } /** Returns the subset of nodes of the host type */ public NodeList hosts() { return nodeType(NodeType.host); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return matching(node -> node.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return matching(node -> node.hasParent(hostname)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Node.State first, Node.State... rest) { if (rest.length == 0) { return matching(node -> node.state() == first); } return state(EnumSet.of(first, rest)); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Set<Node.State> nodeStates) { return matching(node -> nodeStates.contains(node.state())); } /** Returns the subset of nodes which have a record of being down */ public NodeList down() { return matching(Node::isDown); } /** Returns the subset of nodes which have retirement requested */ public NodeList retirementRequested() { return matching(node -> node.status().wantToRetire() || node.status().preferToRetire()); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(NodeList children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); } /** Returns the nodes contained in the group identified by given index */ public NodeList group(int index) { return matching(n -> n.allocation().isPresent() && n.allocation().get().membership().cluster().group().equals(Optional.of(ClusterSpec.Group.from(index)))); } /** Returns the parent node of the given child node */ public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> stream().filter(node -> node.hostname().equals(parentHostname)) .findFirst()); } /** * Returns the cluster spec of the nodes in this, without any group designation * * @throws IllegalStateException if there are no nodes in thus list or they do not all belong * to the same cluster */ public ClusterSpec clusterSpec() { ensureSingleCluster(); if (isEmpty()) throw new IllegalStateException("No nodes"); return first().get().allocation().get().membership().cluster().with(Optional.empty()); } /** * Returns the resources of the nodes of this. * * NOTE: If the nodes do not all have the same values of node resources, a random pick among those node resources * will be returned. * * @throws IllegalStateException if the nodes in this do not all belong to the same cluster */ public ClusterResources toResources() { ensureSingleCluster(); if (isEmpty()) return new ClusterResources(0, 0, NodeResources.unspecified()); return new ClusterResources(size(), (int)stream().map(node -> node.allocation().get().membership().cluster().group().get()) .distinct() .count(), first().get().resources()); } /** Returns the nodes that are allocated on an exclusive network switch within its cluster */ public NodeList onExclusiveSwitch(NodeList clusterHosts) { ensureSingleCluster(); Map<String, Long> switchCount = clusterHosts.stream() .flatMap(host -> host.switchHostname().stream()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); return matching(node -> { Optional<Node> nodeOnSwitch = clusterHosts.parentOf(node); if (node.parentHostname().isPresent()) { if (nodeOnSwitch.isEmpty()) { throw new IllegalArgumentException("Parent of " + node + ", " + node.parentHostname().get() + ", not found in given cluster hosts"); } } else { nodeOnSwitch = Optional.of(node); } Optional<String> allocatedSwitch = nodeOnSwitch.flatMap(Node::switchHostname); return allocatedSwitch.isEmpty() || switchCount.get(allocatedSwitch.get()) == 1; }); } private void ensureSingleCluster() { if (isEmpty()) return; if (stream().anyMatch(node -> node.allocation().isEmpty())) throw new IllegalStateException("Some nodes are not allocated to a cluster"); ClusterSpec firstNodeSpec = first().get().allocation().get().membership().cluster().with(Optional.empty()); if (stream().map(node -> node.allocation().get().membership().cluster().with(Optional.empty())) .anyMatch(clusterSpec -> ! clusterSpec.equals(firstNodeSpec))) throw new IllegalStateException("Nodes belong to multiple clusters"); } /** Returns the nodes of this as a stream */ public Stream<Node> stream() { return asList().stream(); } public static NodeList copyOf(List<Node> nodes) { return new NodeList(nodes, false); } @Override public String toString() { return asList().toString(); } @Override public int hashCode() { return asList().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeList)) return false; return this.asList().equals(((NodeList) other).asList()); } }
Thanks, fixed.
public static Value largestOf(Value value1, Value value2) { if (value1 == DOUBLE || value2 == DOUBLE) return DOUBLE; if (value1 == FLOAT || value2 == FLOAT) return FLOAT; if (value1 == BFLOAT16 || value2 == BFLOAT16) return FLOAT; if (value1 == INT8 || value2 == INT8) return FLOAT; return FLOAT; }
if (value1 == FLOAT || value2 == FLOAT) return FLOAT;
public static Value largestOf(Value value1, Value value2) { if (value1 == DOUBLE || value2 == DOUBLE) return DOUBLE; if (value1 == FLOAT || value2 == FLOAT) return FLOAT; if (value1 == BFLOAT16 || value2 == BFLOAT16) return BFLOAT16; return INT8; }
class TensorType { static Ascii7BitMatcher labelMatcher = new Ascii7BitMatcher("-_@" + charsAndNumbers(), "_@$" + charsAndNumbers()); /** The permissible cell value types. Default is double. */ public enum Value { DOUBLE("double"), FLOAT("float"), INT8("int8"), BFLOAT16("bfloat16"); private final String id; Value(String id) { this.id = id; } public String id() { return id; } public boolean isEqualOrLargerThan(TensorType.Value other) { return this == other || largestOf(this, other) == this; } public static Value largestOf(List<Value> values) { if (values.isEmpty()) return Value.DOUBLE; Value largest = null; for (Value value : values) { if (largest == null) largest = value; else largest = largestOf(largest, value); } return largest; } @Override public String toString() { return name().toLowerCase(); } public static Value fromId(String valueTypeString) { switch (valueTypeString) { case "double" : return Value.DOUBLE; case "float" : return Value.FLOAT; case "bfloat16" : return Value.BFLOAT16; case "int8" : return Value.INT8; default : throw new IllegalArgumentException("Value type must be either 'double', 'float', " + "'bfloat16', or 'int8' but was '" + valueTypeString + "'"); } } }; /** The empty tensor type - which is the same as a double */ public static final TensorType empty = new TensorType(Value.DOUBLE, Collections.emptyList()); private final Value valueType; /** Sorted list of the dimensions of this */ private final ImmutableList<Dimension> dimensions; private final TensorType mappedSubtype; public TensorType(Value valueType, Collection<Dimension> dimensions) { this.valueType = valueType; List<Dimension> dimensionList = new ArrayList<>(dimensions); Collections.sort(dimensionList); this.dimensions = ImmutableList.copyOf(dimensionList); if (dimensionList.stream().allMatch(d -> d.isIndexed())) mappedSubtype = empty; else if (dimensionList.stream().noneMatch(d -> d.isIndexed())) mappedSubtype = this; else mappedSubtype = new TensorType(valueType, dimensions.stream().filter(d -> ! d.isIndexed()).collect(Collectors.toList())); } static public Value combinedValueType(TensorType ... types) { List<Value> valueTypes = new ArrayList<>(); for (TensorType type : types) { if (type.rank() > 0) { valueTypes.add(type.valueType()); } } return Value.largestOf(valueTypes); } /** * Returns a tensor type instance from a * <a href="https: * <code>tensor(dimension1, dimension2, ...)</code> * where each dimension is either * <ul> * <li><code>dimension-name[]</code> - an unbound indexed dimension * <li><code>dimension-name[int]</code> - an bound indexed dimension * <li><code>dimension-name{}</code> - a mapped dimension * </ul> * Example: <code>tensor(x[10],y[20])</code> (a matrix) */ public static TensorType fromSpec(String specString) { return TensorTypeParser.fromSpec(specString); } /** Returns the numeric type of the cell values of this */ public Value valueType() { return valueType; } /** The type representing the mapped subset of dimensions of this. */ public TensorType mappedSubtype() { return mappedSubtype; } /** Returns the number of dimensions of this: dimensions().size() */ public int rank() { return dimensions.size(); } /** Returns an immutable list of the dimensions of this */ public List<Dimension> dimensions() { return dimensions; } /** Returns an immutable set of the names of the dimensions of this */ public Set<String> dimensionNames() { return dimensions.stream().map(Dimension::name).collect(Collectors.toSet()); } /** Returns the dimension with this name, or empty if not present */ public Optional<Dimension> dimension(String name) { return indexOfDimension(name).map(i -> dimensions.get(i)); } /** Returns the 0-base index of this dimension, or empty if it is not present */ public Optional<Integer> indexOfDimension(String dimension) { for (int i = 0; i < dimensions.size(); i++) if (dimensions.get(i).name().equals(dimension)) return Optional.of(i); return Optional.empty(); } /* Returns the bound of this dimension if it is present and bound in this, empty otherwise */ public Optional<Long> sizeOfDimension(String dimension) { Optional<Dimension> d = dimension(dimension); if ( ! d.isPresent()) return Optional.empty(); return d.get().size(); } /** * Returns whether this type can be assigned to the given type, * i.e if the given type is a generalization of this type. */ public boolean isAssignableTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, false, true); } /** * Returns whether this type can be converted to the given type. * This is true if this type isAssignableTo the given type or * if it is not assignable only because it has a shorter dimension length * than the given type in some shared dimension(s), as it can then be * converted to the given type by zero padding. */ public boolean isConvertibleTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, true, true); } /** * Returns whether or not this type can simply be renamed to * the given type. This is the same as being assignable, but disregarding * dimension names. */ public boolean isRenamableTo(TensorType other) { return isConvertibleOrAssignableTo(other, false, false); } private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible, boolean considerName) { if ( ! generalization.valueType().isEqualOrLargerThan(this.valueType) ) return false; if (generalization.dimensions().size() != this.dimensions().size()) return false; for (int i = 0; i < generalization.dimensions().size(); i++) { Dimension thisDimension = this.dimensions().get(i); Dimension generalizationDimension = generalization.dimensions().get(i); if (thisDimension.isIndexed() != generalizationDimension.isIndexed()) return false; if (considerName && ! thisDimension.name().equals(generalizationDimension.name())) return false; if (generalizationDimension.size().isPresent()) { if ( ! thisDimension.size().isPresent()) return false; if (convertible) { if (thisDimension.size().get() > generalizationDimension.size().get()) return false; } else { if (!thisDimension.size().get().equals(generalizationDimension.size().get())) return false; } } } return true; } @Override public String toString() { return "tensor" + (valueType == Value.DOUBLE ? "" : "<" + valueType.id() + ">") + "(" + dimensions.stream().map(Dimension::toString).collect(Collectors.joining(",")) + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TensorType other = (TensorType)o; if ( (this.rank() == 0) && (other.rank() == 0)) return true; if ( this.valueType != other.valueType) return false; if ( ! this.dimensions.equals(other.dimensions)) return false; return true; } /** Returns whether the given type has the same dimension names as this */ public boolean mathematicallyEquals(TensorType other) { if (dimensions().size() != other.dimensions().size()) return false; for (int i = 0; i < dimensions().size(); i++) if (!dimensions().get(i).name().equals(other.dimensions().get(i).name())) return false; return true; } /** * Returns the dimensionwise generalization of this and the given type, or empty if no generalization exists. * A dimensionwise generalization exists if the two tensors share the same dimensions, and each dimension * is compatible. * For example, the dimensionwise generalization of tensor(x[],y[5]) and tensor(x[5],y[]) is tensor(x[],y[]) */ public Optional<TensorType> dimensionwiseGeneralizationWith(TensorType other) { if (this.equals(other)) return Optional.of(this); if (this.dimensions.size() != other.dimensions.size()) return Optional.empty(); Builder b = new Builder(TensorType.Value.largestOf(valueType, other.valueType)); for (int i = 0; i < dimensions.size(); i++) { Dimension thisDim = this.dimensions().get(i); Dimension otherDim = other.dimensions().get(i); if ( ! thisDim.name().equals(otherDim.name())) return Optional.empty(); if (thisDim.isIndexed() && otherDim.isIndexed()) { if (thisDim.size().isPresent() && otherDim.size().isPresent()) { if ( ! thisDim.size().get().equals(otherDim.size().get())) return Optional.empty(); b.dimension(thisDim); } else if (thisDim.size().isPresent()) { b.dimension(otherDim); } else if (otherDim.size().isPresent()) { b.dimension(thisDim); } else { b.dimension(thisDim); } } else if ( ! thisDim.isIndexed() && ! otherDim.isIndexed()) { b.dimension(thisDim); } else { return Optional.empty(); } } return Optional.of(b.build()); } @Override public int hashCode() { return Objects.hash(dimensions, valueType); } /** * A tensor dimension. * Dimensions have the natural order of their names. */ public static abstract class Dimension implements Comparable<Dimension> { public enum Type { indexedBound, indexedUnbound, mapped } private final String name; private Dimension(String name) { this.name = requireIdentifier(name); } public final String name() { return name; } /** Returns the size of this dimension if it is bound, empty otherwise */ public abstract Optional<Long> size(); public abstract Type type(); /** Returns a copy of this with the name set to the given name */ public abstract Dimension withName(String name); /** Returns true if this is an indexed bound or unbound type */ public boolean isIndexed() { return type() == Type.indexedBound || type() == Type.indexedUnbound; } /** Returns true if this is of the mapped type */ public boolean isMapped() { return type() == Type.mapped; } /** * Returns the dimension resulting from combining two dimensions having the same name but possibly different * types: * * [N] + [M] = [ min(N, M) ] * [N] + [] = [] * [] + {} = {} */ Dimension combineWith(Optional<Dimension> other, boolean allowDifferentSizes) { if ( ! other.isPresent()) return this; if (this instanceof MappedDimension) return this; if (other.get() instanceof MappedDimension) return other.get(); if (this instanceof IndexedUnboundDimension) return this; if (other.get() instanceof IndexedUnboundDimension) return other.get(); IndexedBoundDimension thisIb = (IndexedBoundDimension)this; IndexedBoundDimension otherIb = (IndexedBoundDimension)other.get(); if (allowDifferentSizes) return thisIb.size().get() < otherIb.size().get() ? thisIb : otherIb; if ( ! thisIb.size().equals(otherIb.size())) throw new IllegalArgumentException("Unequal dimension sizes in " + thisIb + " and " + otherIb); return thisIb; } @Override public abstract String toString(); @Override public boolean equals(Object other) { if (this == other) return true; if (other == null || getClass() != other.getClass()) return false; return name.equals(((Dimension)other).name); } @Override public int hashCode() { return name.hashCode(); } @Override public int compareTo(Dimension other) { return this.name.compareTo(other.name); } public static Dimension indexed(String name, long size) { return new IndexedBoundDimension(name, size); } public static Dimension indexed(String name) { return new IndexedUnboundDimension(name); } public static Dimension mapped(String name) { return new MappedDimension(name); } static private String requireIdentifier(String name) { if (name == null) throw new IllegalArgumentException("A dimension name cannot be null"); if ( ! TensorType.labelMatcher.matches(name)) throw new IllegalArgumentException("A dimension name must be an identifier or integer, not '" + name + "'"); return name; } } public static class IndexedBoundDimension extends TensorType.Dimension { private final Long size; private IndexedBoundDimension(String name, long size) { super(name); if (size < 1) throw new IllegalArgumentException("Size of bound dimension '" + name + "' must be at least 1"); if (size > Integer.MAX_VALUE) throw new IllegalArgumentException("Size of bound dimension '" + name + "' cannot be larger than " + Integer.MAX_VALUE); this.size = size; } @Override public Optional<Long> size() { return Optional.of(size); } @Override public Type type() { return Type.indexedBound; } @Override public IndexedBoundDimension withName(String name) { return new IndexedBoundDimension(name, size); } @Override public String toString() { return name() + "[" + size + "]"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; IndexedBoundDimension that = (IndexedBoundDimension) o; if (!size.equals(that.size)) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + size.hashCode(); return result; } } public static class IndexedUnboundDimension extends TensorType.Dimension { private IndexedUnboundDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.indexedUnbound; } @Override public IndexedUnboundDimension withName(String name) { return new IndexedUnboundDimension(name); } @Override public String toString() { return name() + "[]"; } } public static class MappedDimension extends TensorType.Dimension { private MappedDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.mapped; } @Override public MappedDimension withName(String name) { return new MappedDimension(name); } @Override public String toString() { return name() + "{}"; } } public static class Builder { private final Map<String, Dimension> dimensions = new LinkedHashMap<>(); private final Value valueType; /** Creates an empty builder with cells of type double */ public Builder() { this(Value.DOUBLE); } public Builder(Value valueType) { this.valueType = valueType; } /** * Creates a builder containing a combination of the dimensions of the given types * * If the same dimension is indexed with different size restrictions the smallest size will be used. * If it is size restricted in one argument but not the other it will not be size restricted. * If it is indexed in one and mapped in the other it will become mapped. * * The value type will be the largest of the value types of the input types */ public Builder(TensorType ... types) { this(true, types); } public Builder(boolean allowDifferentSizes, TensorType ... types) { this.valueType = TensorType.combinedValueType(types); for (TensorType type : types) addDimensionsOf(type, allowDifferentSizes); } /** Creates a builder from the given dimensions, having double as the value type */ public Builder(Iterable<Dimension> dimensions) { this(Value.DOUBLE, dimensions); } /** Creates a builder from the given value type and dimensions */ public Builder(Value valueType, Iterable<Dimension> dimensions) { this.valueType = valueType; for (TensorType.Dimension dimension : dimensions) { dimension(dimension); } } private void addDimensionsOf(TensorType type, boolean allowDifferentSizes) { for (Dimension dimension : type.dimensions) { set(dimension.combineWith(Optional.ofNullable(dimensions.get(dimension.name())), allowDifferentSizes)); } } /** Returns the current number of dimensions in this */ public int rank() { return dimensions.size(); } /** * Adds a new dimension to this * * @throws IllegalArgumentException if the dimension is already present */ private Builder add(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); if (dimensions.containsKey(dimension.name())) throw new IllegalArgumentException("Could not add dimension " + dimension + " as this dimension " + "is already present"); dimensions.put(dimension.name(), dimension); return this; } /** Adds or replaces a dimension in this */ public Builder set(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); dimensions.put(dimension.name(), dimension); return this; } /** * Adds a bound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name, long size) { return add(new IndexedBoundDimension(name, size)); } /** * Adds an unbound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name) { return add(new IndexedUnboundDimension(name)); } /** * Adds a mapped dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder mapped(String name) { return add(new MappedDimension(name)); } /** Adds the given dimension */ public Builder dimension(Dimension dimension) { return add(dimension); } /** Returns the given dimension, or empty if none is present */ public Optional<Dimension> getDimension(String dimension) { return Optional.ofNullable(dimensions.get(dimension)); } public Builder dimension(String name, Dimension.Type type) { switch (type) { case mapped : mapped(name); break; case indexedUnbound : indexed(name); break; default : throw new IllegalArgumentException("This can not create a dimension of type " + type); } return this; } public TensorType build() { return new TensorType(valueType, dimensions.values()); } } }
class TensorType { static Ascii7BitMatcher labelMatcher = new Ascii7BitMatcher("-_@" + charsAndNumbers(), "_@$" + charsAndNumbers()); /** The permissible cell value types. Default is double. */ public enum Value { DOUBLE("double"), FLOAT("float"), INT8("int8"), BFLOAT16("bfloat16"); private final String id; Value(String id) { this.id = id; } public String id() { return id; } public boolean isEqualOrLargerThan(TensorType.Value other) { return this == other || largestOf(this, other) == this; } public static Value largestOf(List<Value> values) { if (values.isEmpty()) return Value.DOUBLE; Value largest = null; for (Value value : values) { if (largest == null) largest = value; else largest = largestOf(largest, value); } return largest; } @Override public String toString() { return name().toLowerCase(); } public static Value fromId(String valueTypeString) { for(Value value : Value.values()) { if (value.id.equals(valueTypeString)) { return value; } } throw new IllegalArgumentException("Value type must be either 'double', 'float', " + "'bfloat16', or 'int8' but was '" + valueTypeString + "'"); } }; /** The empty tensor type - which is the same as a double */ public static final TensorType empty = new TensorType(Value.DOUBLE, Collections.emptyList()); private final Value valueType; /** Sorted list of the dimensions of this */ private final ImmutableList<Dimension> dimensions; private final TensorType mappedSubtype; public TensorType(Value valueType, Collection<Dimension> dimensions) { this.valueType = valueType; List<Dimension> dimensionList = new ArrayList<>(dimensions); Collections.sort(dimensionList); this.dimensions = ImmutableList.copyOf(dimensionList); if (dimensionList.stream().allMatch(d -> d.isIndexed())) mappedSubtype = empty; else if (dimensionList.stream().noneMatch(d -> d.isIndexed())) mappedSubtype = this; else mappedSubtype = new TensorType(valueType, dimensions.stream().filter(d -> ! d.isIndexed()).collect(Collectors.toList())); } static public Value combinedValueType(TensorType ... types) { List<Value> valueTypes = new ArrayList<>(); for (TensorType type : types) { if (type.rank() > 0) { valueTypes.add(type.valueType()); } } return Value.largestOf(valueTypes); } /** * Returns a tensor type instance from a * <a href="https: * <code>tensor(dimension1, dimension2, ...)</code> * where each dimension is either * <ul> * <li><code>dimension-name[]</code> - an unbound indexed dimension * <li><code>dimension-name[int]</code> - an bound indexed dimension * <li><code>dimension-name{}</code> - a mapped dimension * </ul> * Example: <code>tensor(x[10],y[20])</code> (a matrix) */ public static TensorType fromSpec(String specString) { return TensorTypeParser.fromSpec(specString); } /** Returns the numeric type of the cell values of this */ public Value valueType() { return valueType; } /** The type representing the mapped subset of dimensions of this. */ public TensorType mappedSubtype() { return mappedSubtype; } /** Returns the number of dimensions of this: dimensions().size() */ public int rank() { return dimensions.size(); } /** Returns an immutable list of the dimensions of this */ public List<Dimension> dimensions() { return dimensions; } /** Returns an immutable set of the names of the dimensions of this */ public Set<String> dimensionNames() { return dimensions.stream().map(Dimension::name).collect(Collectors.toSet()); } /** Returns the dimension with this name, or empty if not present */ public Optional<Dimension> dimension(String name) { return indexOfDimension(name).map(i -> dimensions.get(i)); } /** Returns the 0-base index of this dimension, or empty if it is not present */ public Optional<Integer> indexOfDimension(String dimension) { for (int i = 0; i < dimensions.size(); i++) if (dimensions.get(i).name().equals(dimension)) return Optional.of(i); return Optional.empty(); } /* Returns the bound of this dimension if it is present and bound in this, empty otherwise */ public Optional<Long> sizeOfDimension(String dimension) { Optional<Dimension> d = dimension(dimension); if ( ! d.isPresent()) return Optional.empty(); return d.get().size(); } /** * Returns whether this type can be assigned to the given type, * i.e if the given type is a generalization of this type. */ public boolean isAssignableTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, false, true); } /** * Returns whether this type can be converted to the given type. * This is true if this type isAssignableTo the given type or * if it is not assignable only because it has a shorter dimension length * than the given type in some shared dimension(s), as it can then be * converted to the given type by zero padding. */ public boolean isConvertibleTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, true, true); } /** * Returns whether or not this type can simply be renamed to * the given type. This is the same as being assignable, but disregarding * dimension names. */ public boolean isRenamableTo(TensorType other) { return isConvertibleOrAssignableTo(other, false, false); } private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible, boolean considerName) { if ( ! generalization.valueType().isEqualOrLargerThan(this.valueType) ) return false; if (generalization.dimensions().size() != this.dimensions().size()) return false; for (int i = 0; i < generalization.dimensions().size(); i++) { Dimension thisDimension = this.dimensions().get(i); Dimension generalizationDimension = generalization.dimensions().get(i); if (thisDimension.isIndexed() != generalizationDimension.isIndexed()) return false; if (considerName && ! thisDimension.name().equals(generalizationDimension.name())) return false; if (generalizationDimension.size().isPresent()) { if ( ! thisDimension.size().isPresent()) return false; if (convertible) { if (thisDimension.size().get() > generalizationDimension.size().get()) return false; } else { if (!thisDimension.size().get().equals(generalizationDimension.size().get())) return false; } } } return true; } @Override public String toString() { return "tensor" + (valueType == Value.DOUBLE ? "" : "<" + valueType.id() + ">") + "(" + dimensions.stream().map(Dimension::toString).collect(Collectors.joining(",")) + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TensorType other = (TensorType)o; if ( (this.rank() == 0) && (other.rank() == 0)) return true; if ( this.valueType != other.valueType) return false; if ( ! this.dimensions.equals(other.dimensions)) return false; return true; } /** Returns whether the given type has the same dimension names as this */ public boolean mathematicallyEquals(TensorType other) { if (dimensions().size() != other.dimensions().size()) return false; for (int i = 0; i < dimensions().size(); i++) if (!dimensions().get(i).name().equals(other.dimensions().get(i).name())) return false; return true; } /** * Returns the dimensionwise generalization of this and the given type, or empty if no generalization exists. * A dimensionwise generalization exists if the two tensors share the same dimensions, and each dimension * is compatible. * For example, the dimensionwise generalization of tensor(x[],y[5]) and tensor(x[5],y[]) is tensor(x[],y[]) */ public Optional<TensorType> dimensionwiseGeneralizationWith(TensorType other) { if (this.equals(other)) return Optional.of(this); if (this.dimensions.size() != other.dimensions.size()) return Optional.empty(); Builder b = new Builder(TensorType.Value.largestOf(valueType, other.valueType)); for (int i = 0; i < dimensions.size(); i++) { Dimension thisDim = this.dimensions().get(i); Dimension otherDim = other.dimensions().get(i); if ( ! thisDim.name().equals(otherDim.name())) return Optional.empty(); if (thisDim.isIndexed() && otherDim.isIndexed()) { if (thisDim.size().isPresent() && otherDim.size().isPresent()) { if ( ! thisDim.size().get().equals(otherDim.size().get())) return Optional.empty(); b.dimension(thisDim); } else if (thisDim.size().isPresent()) { b.dimension(otherDim); } else if (otherDim.size().isPresent()) { b.dimension(thisDim); } else { b.dimension(thisDim); } } else if ( ! thisDim.isIndexed() && ! otherDim.isIndexed()) { b.dimension(thisDim); } else { return Optional.empty(); } } return Optional.of(b.build()); } @Override public int hashCode() { return Objects.hash(dimensions, valueType); } /** * A tensor dimension. * Dimensions have the natural order of their names. */ public static abstract class Dimension implements Comparable<Dimension> { public enum Type { indexedBound, indexedUnbound, mapped } private final String name; private Dimension(String name) { this.name = requireIdentifier(name); } public final String name() { return name; } /** Returns the size of this dimension if it is bound, empty otherwise */ public abstract Optional<Long> size(); public abstract Type type(); /** Returns a copy of this with the name set to the given name */ public abstract Dimension withName(String name); /** Returns true if this is an indexed bound or unbound type */ public boolean isIndexed() { return type() == Type.indexedBound || type() == Type.indexedUnbound; } /** Returns true if this is of the mapped type */ public boolean isMapped() { return type() == Type.mapped; } /** * Returns the dimension resulting from combining two dimensions having the same name but possibly different * types: * * [N] + [M] = [ min(N, M) ] * [N] + [] = [] * [] + {} = {} */ Dimension combineWith(Optional<Dimension> other, boolean allowDifferentSizes) { if ( ! other.isPresent()) return this; if (this instanceof MappedDimension) return this; if (other.get() instanceof MappedDimension) return other.get(); if (this instanceof IndexedUnboundDimension) return this; if (other.get() instanceof IndexedUnboundDimension) return other.get(); IndexedBoundDimension thisIb = (IndexedBoundDimension)this; IndexedBoundDimension otherIb = (IndexedBoundDimension)other.get(); if (allowDifferentSizes) return thisIb.size().get() < otherIb.size().get() ? thisIb : otherIb; if ( ! thisIb.size().equals(otherIb.size())) throw new IllegalArgumentException("Unequal dimension sizes in " + thisIb + " and " + otherIb); return thisIb; } @Override public abstract String toString(); @Override public boolean equals(Object other) { if (this == other) return true; if (other == null || getClass() != other.getClass()) return false; return name.equals(((Dimension)other).name); } @Override public int hashCode() { return name.hashCode(); } @Override public int compareTo(Dimension other) { return this.name.compareTo(other.name); } public static Dimension indexed(String name, long size) { return new IndexedBoundDimension(name, size); } public static Dimension indexed(String name) { return new IndexedUnboundDimension(name); } public static Dimension mapped(String name) { return new MappedDimension(name); } static private String requireIdentifier(String name) { if (name == null) throw new IllegalArgumentException("A dimension name cannot be null"); if ( ! TensorType.labelMatcher.matches(name)) throw new IllegalArgumentException("A dimension name must be an identifier or integer, not '" + name + "'"); return name; } } public static class IndexedBoundDimension extends TensorType.Dimension { private final Long size; private IndexedBoundDimension(String name, long size) { super(name); if (size < 1) throw new IllegalArgumentException("Size of bound dimension '" + name + "' must be at least 1"); if (size > Integer.MAX_VALUE) throw new IllegalArgumentException("Size of bound dimension '" + name + "' cannot be larger than " + Integer.MAX_VALUE); this.size = size; } @Override public Optional<Long> size() { return Optional.of(size); } @Override public Type type() { return Type.indexedBound; } @Override public IndexedBoundDimension withName(String name) { return new IndexedBoundDimension(name, size); } @Override public String toString() { return name() + "[" + size + "]"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; IndexedBoundDimension that = (IndexedBoundDimension) o; if (!size.equals(that.size)) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + size.hashCode(); return result; } } public static class IndexedUnboundDimension extends TensorType.Dimension { private IndexedUnboundDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.indexedUnbound; } @Override public IndexedUnboundDimension withName(String name) { return new IndexedUnboundDimension(name); } @Override public String toString() { return name() + "[]"; } } public static class MappedDimension extends TensorType.Dimension { private MappedDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.mapped; } @Override public MappedDimension withName(String name) { return new MappedDimension(name); } @Override public String toString() { return name() + "{}"; } } public static class Builder { private final Map<String, Dimension> dimensions = new LinkedHashMap<>(); private final Value valueType; /** Creates an empty builder with cells of type double */ public Builder() { this(Value.DOUBLE); } public Builder(Value valueType) { this.valueType = valueType; } /** * Creates a builder containing a combination of the dimensions of the given types * * If the same dimension is indexed with different size restrictions the smallest size will be used. * If it is size restricted in one argument but not the other it will not be size restricted. * If it is indexed in one and mapped in the other it will become mapped. * * The value type will be the largest of the value types of the input types */ public Builder(TensorType ... types) { this(true, types); } public Builder(boolean allowDifferentSizes, TensorType ... types) { this.valueType = TensorType.combinedValueType(types); for (TensorType type : types) addDimensionsOf(type, allowDifferentSizes); } /** Creates a builder from the given dimensions, having double as the value type */ public Builder(Iterable<Dimension> dimensions) { this(Value.DOUBLE, dimensions); } /** Creates a builder from the given value type and dimensions */ public Builder(Value valueType, Iterable<Dimension> dimensions) { this.valueType = valueType; for (TensorType.Dimension dimension : dimensions) { dimension(dimension); } } private void addDimensionsOf(TensorType type, boolean allowDifferentSizes) { for (Dimension dimension : type.dimensions) { set(dimension.combineWith(Optional.ofNullable(dimensions.get(dimension.name())), allowDifferentSizes)); } } /** Returns the current number of dimensions in this */ public int rank() { return dimensions.size(); } /** * Adds a new dimension to this * * @throws IllegalArgumentException if the dimension is already present */ private Builder add(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); if (dimensions.containsKey(dimension.name())) throw new IllegalArgumentException("Could not add dimension " + dimension + " as this dimension " + "is already present"); dimensions.put(dimension.name(), dimension); return this; } /** Adds or replaces a dimension in this */ public Builder set(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); dimensions.put(dimension.name(), dimension); return this; } /** * Adds a bound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name, long size) { return add(new IndexedBoundDimension(name, size)); } /** * Adds an unbound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name) { return add(new IndexedUnboundDimension(name)); } /** * Adds a mapped dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder mapped(String name) { return add(new MappedDimension(name)); } /** Adds the given dimension */ public Builder dimension(Dimension dimension) { return add(dimension); } /** Returns the given dimension, or empty if none is present */ public Optional<Dimension> getDimension(String dimension) { return Optional.ofNullable(dimensions.get(dimension)); } public Builder dimension(String name, Dimension.Type type) { switch (type) { case mapped : mapped(name); break; case indexedUnbound : indexed(name); break; default : throw new IllegalArgumentException("This can not create a dimension of type " + type); } return this; } public TensorType build() { return new TensorType(valueType, dimensions.values()); } } }
Thanks, fixed.
public static Value fromId(String valueTypeString) { switch (valueTypeString) { case "double" : return Value.DOUBLE; case "float" : return Value.FLOAT; case "bfloat16" : return Value.BFLOAT16; case "int8" : return Value.INT8; default : throw new IllegalArgumentException("Value type must be either 'double', 'float', " + "'bfloat16', or 'int8' but was '" + valueTypeString + "'"); } }
case "bfloat16" : return Value.BFLOAT16;
public static Value fromId(String valueTypeString) { for(Value value : Value.values()) { if (value.id.equals(valueTypeString)) { return value; } } throw new IllegalArgumentException("Value type must be either 'double', 'float', " + "'bfloat16', or 'int8' but was '" + valueTypeString + "'"); }
class TensorType { static Ascii7BitMatcher labelMatcher = new Ascii7BitMatcher("-_@" + charsAndNumbers(), "_@$" + charsAndNumbers()); /** The permissible cell value types. Default is double. */ public enum Value { DOUBLE("double"), FLOAT("float"), INT8("int8"), BFLOAT16("bfloat16"); private final String id; Value(String id) { this.id = id; } public String id() { return id; } public boolean isEqualOrLargerThan(TensorType.Value other) { return this == other || largestOf(this, other) == this; } public static Value largestOf(List<Value> values) { if (values.isEmpty()) return Value.DOUBLE; Value largest = null; for (Value value : values) { if (largest == null) largest = value; else largest = largestOf(largest, value); } return largest; } public static Value largestOf(Value value1, Value value2) { if (value1 == DOUBLE || value2 == DOUBLE) return DOUBLE; if (value1 == FLOAT || value2 == FLOAT) return FLOAT; if (value1 == BFLOAT16 || value2 == BFLOAT16) return FLOAT; if (value1 == INT8 || value2 == INT8) return FLOAT; return FLOAT; } @Override public String toString() { return name().toLowerCase(); } }; /** The empty tensor type - which is the same as a double */ public static final TensorType empty = new TensorType(Value.DOUBLE, Collections.emptyList()); private final Value valueType; /** Sorted list of the dimensions of this */ private final ImmutableList<Dimension> dimensions; private final TensorType mappedSubtype; public TensorType(Value valueType, Collection<Dimension> dimensions) { this.valueType = valueType; List<Dimension> dimensionList = new ArrayList<>(dimensions); Collections.sort(dimensionList); this.dimensions = ImmutableList.copyOf(dimensionList); if (dimensionList.stream().allMatch(d -> d.isIndexed())) mappedSubtype = empty; else if (dimensionList.stream().noneMatch(d -> d.isIndexed())) mappedSubtype = this; else mappedSubtype = new TensorType(valueType, dimensions.stream().filter(d -> ! d.isIndexed()).collect(Collectors.toList())); } static public Value combinedValueType(TensorType ... types) { List<Value> valueTypes = new ArrayList<>(); for (TensorType type : types) { if (type.rank() > 0) { valueTypes.add(type.valueType()); } } return Value.largestOf(valueTypes); } /** * Returns a tensor type instance from a * <a href="https: * <code>tensor(dimension1, dimension2, ...)</code> * where each dimension is either * <ul> * <li><code>dimension-name[]</code> - an unbound indexed dimension * <li><code>dimension-name[int]</code> - an bound indexed dimension * <li><code>dimension-name{}</code> - a mapped dimension * </ul> * Example: <code>tensor(x[10],y[20])</code> (a matrix) */ public static TensorType fromSpec(String specString) { return TensorTypeParser.fromSpec(specString); } /** Returns the numeric type of the cell values of this */ public Value valueType() { return valueType; } /** The type representing the mapped subset of dimensions of this. */ public TensorType mappedSubtype() { return mappedSubtype; } /** Returns the number of dimensions of this: dimensions().size() */ public int rank() { return dimensions.size(); } /** Returns an immutable list of the dimensions of this */ public List<Dimension> dimensions() { return dimensions; } /** Returns an immutable set of the names of the dimensions of this */ public Set<String> dimensionNames() { return dimensions.stream().map(Dimension::name).collect(Collectors.toSet()); } /** Returns the dimension with this name, or empty if not present */ public Optional<Dimension> dimension(String name) { return indexOfDimension(name).map(i -> dimensions.get(i)); } /** Returns the 0-base index of this dimension, or empty if it is not present */ public Optional<Integer> indexOfDimension(String dimension) { for (int i = 0; i < dimensions.size(); i++) if (dimensions.get(i).name().equals(dimension)) return Optional.of(i); return Optional.empty(); } /* Returns the bound of this dimension if it is present and bound in this, empty otherwise */ public Optional<Long> sizeOfDimension(String dimension) { Optional<Dimension> d = dimension(dimension); if ( ! d.isPresent()) return Optional.empty(); return d.get().size(); } /** * Returns whether this type can be assigned to the given type, * i.e if the given type is a generalization of this type. */ public boolean isAssignableTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, false, true); } /** * Returns whether this type can be converted to the given type. * This is true if this type isAssignableTo the given type or * if it is not assignable only because it has a shorter dimension length * than the given type in some shared dimension(s), as it can then be * converted to the given type by zero padding. */ public boolean isConvertibleTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, true, true); } /** * Returns whether or not this type can simply be renamed to * the given type. This is the same as being assignable, but disregarding * dimension names. */ public boolean isRenamableTo(TensorType other) { return isConvertibleOrAssignableTo(other, false, false); } private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible, boolean considerName) { if ( ! generalization.valueType().isEqualOrLargerThan(this.valueType) ) return false; if (generalization.dimensions().size() != this.dimensions().size()) return false; for (int i = 0; i < generalization.dimensions().size(); i++) { Dimension thisDimension = this.dimensions().get(i); Dimension generalizationDimension = generalization.dimensions().get(i); if (thisDimension.isIndexed() != generalizationDimension.isIndexed()) return false; if (considerName && ! thisDimension.name().equals(generalizationDimension.name())) return false; if (generalizationDimension.size().isPresent()) { if ( ! thisDimension.size().isPresent()) return false; if (convertible) { if (thisDimension.size().get() > generalizationDimension.size().get()) return false; } else { if (!thisDimension.size().get().equals(generalizationDimension.size().get())) return false; } } } return true; } @Override public String toString() { return "tensor" + (valueType == Value.DOUBLE ? "" : "<" + valueType.id() + ">") + "(" + dimensions.stream().map(Dimension::toString).collect(Collectors.joining(",")) + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TensorType other = (TensorType)o; if ( (this.rank() == 0) && (other.rank() == 0)) return true; if ( this.valueType != other.valueType) return false; if ( ! this.dimensions.equals(other.dimensions)) return false; return true; } /** Returns whether the given type has the same dimension names as this */ public boolean mathematicallyEquals(TensorType other) { if (dimensions().size() != other.dimensions().size()) return false; for (int i = 0; i < dimensions().size(); i++) if (!dimensions().get(i).name().equals(other.dimensions().get(i).name())) return false; return true; } /** * Returns the dimensionwise generalization of this and the given type, or empty if no generalization exists. * A dimensionwise generalization exists if the two tensors share the same dimensions, and each dimension * is compatible. * For example, the dimensionwise generalization of tensor(x[],y[5]) and tensor(x[5],y[]) is tensor(x[],y[]) */ public Optional<TensorType> dimensionwiseGeneralizationWith(TensorType other) { if (this.equals(other)) return Optional.of(this); if (this.dimensions.size() != other.dimensions.size()) return Optional.empty(); Builder b = new Builder(TensorType.Value.largestOf(valueType, other.valueType)); for (int i = 0; i < dimensions.size(); i++) { Dimension thisDim = this.dimensions().get(i); Dimension otherDim = other.dimensions().get(i); if ( ! thisDim.name().equals(otherDim.name())) return Optional.empty(); if (thisDim.isIndexed() && otherDim.isIndexed()) { if (thisDim.size().isPresent() && otherDim.size().isPresent()) { if ( ! thisDim.size().get().equals(otherDim.size().get())) return Optional.empty(); b.dimension(thisDim); } else if (thisDim.size().isPresent()) { b.dimension(otherDim); } else if (otherDim.size().isPresent()) { b.dimension(thisDim); } else { b.dimension(thisDim); } } else if ( ! thisDim.isIndexed() && ! otherDim.isIndexed()) { b.dimension(thisDim); } else { return Optional.empty(); } } return Optional.of(b.build()); } @Override public int hashCode() { return Objects.hash(dimensions, valueType); } /** * A tensor dimension. * Dimensions have the natural order of their names. */ public static abstract class Dimension implements Comparable<Dimension> { public enum Type { indexedBound, indexedUnbound, mapped } private final String name; private Dimension(String name) { this.name = requireIdentifier(name); } public final String name() { return name; } /** Returns the size of this dimension if it is bound, empty otherwise */ public abstract Optional<Long> size(); public abstract Type type(); /** Returns a copy of this with the name set to the given name */ public abstract Dimension withName(String name); /** Returns true if this is an indexed bound or unbound type */ public boolean isIndexed() { return type() == Type.indexedBound || type() == Type.indexedUnbound; } /** Returns true if this is of the mapped type */ public boolean isMapped() { return type() == Type.mapped; } /** * Returns the dimension resulting from combining two dimensions having the same name but possibly different * types: * * [N] + [M] = [ min(N, M) ] * [N] + [] = [] * [] + {} = {} */ Dimension combineWith(Optional<Dimension> other, boolean allowDifferentSizes) { if ( ! other.isPresent()) return this; if (this instanceof MappedDimension) return this; if (other.get() instanceof MappedDimension) return other.get(); if (this instanceof IndexedUnboundDimension) return this; if (other.get() instanceof IndexedUnboundDimension) return other.get(); IndexedBoundDimension thisIb = (IndexedBoundDimension)this; IndexedBoundDimension otherIb = (IndexedBoundDimension)other.get(); if (allowDifferentSizes) return thisIb.size().get() < otherIb.size().get() ? thisIb : otherIb; if ( ! thisIb.size().equals(otherIb.size())) throw new IllegalArgumentException("Unequal dimension sizes in " + thisIb + " and " + otherIb); return thisIb; } @Override public abstract String toString(); @Override public boolean equals(Object other) { if (this == other) return true; if (other == null || getClass() != other.getClass()) return false; return name.equals(((Dimension)other).name); } @Override public int hashCode() { return name.hashCode(); } @Override public int compareTo(Dimension other) { return this.name.compareTo(other.name); } public static Dimension indexed(String name, long size) { return new IndexedBoundDimension(name, size); } public static Dimension indexed(String name) { return new IndexedUnboundDimension(name); } public static Dimension mapped(String name) { return new MappedDimension(name); } static private String requireIdentifier(String name) { if (name == null) throw new IllegalArgumentException("A dimension name cannot be null"); if ( ! TensorType.labelMatcher.matches(name)) throw new IllegalArgumentException("A dimension name must be an identifier or integer, not '" + name + "'"); return name; } } public static class IndexedBoundDimension extends TensorType.Dimension { private final Long size; private IndexedBoundDimension(String name, long size) { super(name); if (size < 1) throw new IllegalArgumentException("Size of bound dimension '" + name + "' must be at least 1"); if (size > Integer.MAX_VALUE) throw new IllegalArgumentException("Size of bound dimension '" + name + "' cannot be larger than " + Integer.MAX_VALUE); this.size = size; } @Override public Optional<Long> size() { return Optional.of(size); } @Override public Type type() { return Type.indexedBound; } @Override public IndexedBoundDimension withName(String name) { return new IndexedBoundDimension(name, size); } @Override public String toString() { return name() + "[" + size + "]"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; IndexedBoundDimension that = (IndexedBoundDimension) o; if (!size.equals(that.size)) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + size.hashCode(); return result; } } public static class IndexedUnboundDimension extends TensorType.Dimension { private IndexedUnboundDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.indexedUnbound; } @Override public IndexedUnboundDimension withName(String name) { return new IndexedUnboundDimension(name); } @Override public String toString() { return name() + "[]"; } } public static class MappedDimension extends TensorType.Dimension { private MappedDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.mapped; } @Override public MappedDimension withName(String name) { return new MappedDimension(name); } @Override public String toString() { return name() + "{}"; } } public static class Builder { private final Map<String, Dimension> dimensions = new LinkedHashMap<>(); private final Value valueType; /** Creates an empty builder with cells of type double */ public Builder() { this(Value.DOUBLE); } public Builder(Value valueType) { this.valueType = valueType; } /** * Creates a builder containing a combination of the dimensions of the given types * * If the same dimension is indexed with different size restrictions the smallest size will be used. * If it is size restricted in one argument but not the other it will not be size restricted. * If it is indexed in one and mapped in the other it will become mapped. * * The value type will be the largest of the value types of the input types */ public Builder(TensorType ... types) { this(true, types); } public Builder(boolean allowDifferentSizes, TensorType ... types) { this.valueType = TensorType.combinedValueType(types); for (TensorType type : types) addDimensionsOf(type, allowDifferentSizes); } /** Creates a builder from the given dimensions, having double as the value type */ public Builder(Iterable<Dimension> dimensions) { this(Value.DOUBLE, dimensions); } /** Creates a builder from the given value type and dimensions */ public Builder(Value valueType, Iterable<Dimension> dimensions) { this.valueType = valueType; for (TensorType.Dimension dimension : dimensions) { dimension(dimension); } } private void addDimensionsOf(TensorType type, boolean allowDifferentSizes) { for (Dimension dimension : type.dimensions) { set(dimension.combineWith(Optional.ofNullable(dimensions.get(dimension.name())), allowDifferentSizes)); } } /** Returns the current number of dimensions in this */ public int rank() { return dimensions.size(); } /** * Adds a new dimension to this * * @throws IllegalArgumentException if the dimension is already present */ private Builder add(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); if (dimensions.containsKey(dimension.name())) throw new IllegalArgumentException("Could not add dimension " + dimension + " as this dimension " + "is already present"); dimensions.put(dimension.name(), dimension); return this; } /** Adds or replaces a dimension in this */ public Builder set(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); dimensions.put(dimension.name(), dimension); return this; } /** * Adds a bound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name, long size) { return add(new IndexedBoundDimension(name, size)); } /** * Adds an unbound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name) { return add(new IndexedUnboundDimension(name)); } /** * Adds a mapped dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder mapped(String name) { return add(new MappedDimension(name)); } /** Adds the given dimension */ public Builder dimension(Dimension dimension) { return add(dimension); } /** Returns the given dimension, or empty if none is present */ public Optional<Dimension> getDimension(String dimension) { return Optional.ofNullable(dimensions.get(dimension)); } public Builder dimension(String name, Dimension.Type type) { switch (type) { case mapped : mapped(name); break; case indexedUnbound : indexed(name); break; default : throw new IllegalArgumentException("This can not create a dimension of type " + type); } return this; } public TensorType build() { return new TensorType(valueType, dimensions.values()); } } }
class TensorType { static Ascii7BitMatcher labelMatcher = new Ascii7BitMatcher("-_@" + charsAndNumbers(), "_@$" + charsAndNumbers()); /** The permissible cell value types. Default is double. */ public enum Value { DOUBLE("double"), FLOAT("float"), INT8("int8"), BFLOAT16("bfloat16"); private final String id; Value(String id) { this.id = id; } public String id() { return id; } public boolean isEqualOrLargerThan(TensorType.Value other) { return this == other || largestOf(this, other) == this; } public static Value largestOf(List<Value> values) { if (values.isEmpty()) return Value.DOUBLE; Value largest = null; for (Value value : values) { if (largest == null) largest = value; else largest = largestOf(largest, value); } return largest; } public static Value largestOf(Value value1, Value value2) { if (value1 == DOUBLE || value2 == DOUBLE) return DOUBLE; if (value1 == FLOAT || value2 == FLOAT) return FLOAT; if (value1 == BFLOAT16 || value2 == BFLOAT16) return BFLOAT16; return INT8; } @Override public String toString() { return name().toLowerCase(); } }; /** The empty tensor type - which is the same as a double */ public static final TensorType empty = new TensorType(Value.DOUBLE, Collections.emptyList()); private final Value valueType; /** Sorted list of the dimensions of this */ private final ImmutableList<Dimension> dimensions; private final TensorType mappedSubtype; public TensorType(Value valueType, Collection<Dimension> dimensions) { this.valueType = valueType; List<Dimension> dimensionList = new ArrayList<>(dimensions); Collections.sort(dimensionList); this.dimensions = ImmutableList.copyOf(dimensionList); if (dimensionList.stream().allMatch(d -> d.isIndexed())) mappedSubtype = empty; else if (dimensionList.stream().noneMatch(d -> d.isIndexed())) mappedSubtype = this; else mappedSubtype = new TensorType(valueType, dimensions.stream().filter(d -> ! d.isIndexed()).collect(Collectors.toList())); } static public Value combinedValueType(TensorType ... types) { List<Value> valueTypes = new ArrayList<>(); for (TensorType type : types) { if (type.rank() > 0) { valueTypes.add(type.valueType()); } } return Value.largestOf(valueTypes); } /** * Returns a tensor type instance from a * <a href="https: * <code>tensor(dimension1, dimension2, ...)</code> * where each dimension is either * <ul> * <li><code>dimension-name[]</code> - an unbound indexed dimension * <li><code>dimension-name[int]</code> - an bound indexed dimension * <li><code>dimension-name{}</code> - a mapped dimension * </ul> * Example: <code>tensor(x[10],y[20])</code> (a matrix) */ public static TensorType fromSpec(String specString) { return TensorTypeParser.fromSpec(specString); } /** Returns the numeric type of the cell values of this */ public Value valueType() { return valueType; } /** The type representing the mapped subset of dimensions of this. */ public TensorType mappedSubtype() { return mappedSubtype; } /** Returns the number of dimensions of this: dimensions().size() */ public int rank() { return dimensions.size(); } /** Returns an immutable list of the dimensions of this */ public List<Dimension> dimensions() { return dimensions; } /** Returns an immutable set of the names of the dimensions of this */ public Set<String> dimensionNames() { return dimensions.stream().map(Dimension::name).collect(Collectors.toSet()); } /** Returns the dimension with this name, or empty if not present */ public Optional<Dimension> dimension(String name) { return indexOfDimension(name).map(i -> dimensions.get(i)); } /** Returns the 0-base index of this dimension, or empty if it is not present */ public Optional<Integer> indexOfDimension(String dimension) { for (int i = 0; i < dimensions.size(); i++) if (dimensions.get(i).name().equals(dimension)) return Optional.of(i); return Optional.empty(); } /* Returns the bound of this dimension if it is present and bound in this, empty otherwise */ public Optional<Long> sizeOfDimension(String dimension) { Optional<Dimension> d = dimension(dimension); if ( ! d.isPresent()) return Optional.empty(); return d.get().size(); } /** * Returns whether this type can be assigned to the given type, * i.e if the given type is a generalization of this type. */ public boolean isAssignableTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, false, true); } /** * Returns whether this type can be converted to the given type. * This is true if this type isAssignableTo the given type or * if it is not assignable only because it has a shorter dimension length * than the given type in some shared dimension(s), as it can then be * converted to the given type by zero padding. */ public boolean isConvertibleTo(TensorType generalization) { return isConvertibleOrAssignableTo(generalization, true, true); } /** * Returns whether or not this type can simply be renamed to * the given type. This is the same as being assignable, but disregarding * dimension names. */ public boolean isRenamableTo(TensorType other) { return isConvertibleOrAssignableTo(other, false, false); } private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible, boolean considerName) { if ( ! generalization.valueType().isEqualOrLargerThan(this.valueType) ) return false; if (generalization.dimensions().size() != this.dimensions().size()) return false; for (int i = 0; i < generalization.dimensions().size(); i++) { Dimension thisDimension = this.dimensions().get(i); Dimension generalizationDimension = generalization.dimensions().get(i); if (thisDimension.isIndexed() != generalizationDimension.isIndexed()) return false; if (considerName && ! thisDimension.name().equals(generalizationDimension.name())) return false; if (generalizationDimension.size().isPresent()) { if ( ! thisDimension.size().isPresent()) return false; if (convertible) { if (thisDimension.size().get() > generalizationDimension.size().get()) return false; } else { if (!thisDimension.size().get().equals(generalizationDimension.size().get())) return false; } } } return true; } @Override public String toString() { return "tensor" + (valueType == Value.DOUBLE ? "" : "<" + valueType.id() + ">") + "(" + dimensions.stream().map(Dimension::toString).collect(Collectors.joining(",")) + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TensorType other = (TensorType)o; if ( (this.rank() == 0) && (other.rank() == 0)) return true; if ( this.valueType != other.valueType) return false; if ( ! this.dimensions.equals(other.dimensions)) return false; return true; } /** Returns whether the given type has the same dimension names as this */ public boolean mathematicallyEquals(TensorType other) { if (dimensions().size() != other.dimensions().size()) return false; for (int i = 0; i < dimensions().size(); i++) if (!dimensions().get(i).name().equals(other.dimensions().get(i).name())) return false; return true; } /** * Returns the dimensionwise generalization of this and the given type, or empty if no generalization exists. * A dimensionwise generalization exists if the two tensors share the same dimensions, and each dimension * is compatible. * For example, the dimensionwise generalization of tensor(x[],y[5]) and tensor(x[5],y[]) is tensor(x[],y[]) */ public Optional<TensorType> dimensionwiseGeneralizationWith(TensorType other) { if (this.equals(other)) return Optional.of(this); if (this.dimensions.size() != other.dimensions.size()) return Optional.empty(); Builder b = new Builder(TensorType.Value.largestOf(valueType, other.valueType)); for (int i = 0; i < dimensions.size(); i++) { Dimension thisDim = this.dimensions().get(i); Dimension otherDim = other.dimensions().get(i); if ( ! thisDim.name().equals(otherDim.name())) return Optional.empty(); if (thisDim.isIndexed() && otherDim.isIndexed()) { if (thisDim.size().isPresent() && otherDim.size().isPresent()) { if ( ! thisDim.size().get().equals(otherDim.size().get())) return Optional.empty(); b.dimension(thisDim); } else if (thisDim.size().isPresent()) { b.dimension(otherDim); } else if (otherDim.size().isPresent()) { b.dimension(thisDim); } else { b.dimension(thisDim); } } else if ( ! thisDim.isIndexed() && ! otherDim.isIndexed()) { b.dimension(thisDim); } else { return Optional.empty(); } } return Optional.of(b.build()); } @Override public int hashCode() { return Objects.hash(dimensions, valueType); } /** * A tensor dimension. * Dimensions have the natural order of their names. */ public static abstract class Dimension implements Comparable<Dimension> { public enum Type { indexedBound, indexedUnbound, mapped } private final String name; private Dimension(String name) { this.name = requireIdentifier(name); } public final String name() { return name; } /** Returns the size of this dimension if it is bound, empty otherwise */ public abstract Optional<Long> size(); public abstract Type type(); /** Returns a copy of this with the name set to the given name */ public abstract Dimension withName(String name); /** Returns true if this is an indexed bound or unbound type */ public boolean isIndexed() { return type() == Type.indexedBound || type() == Type.indexedUnbound; } /** Returns true if this is of the mapped type */ public boolean isMapped() { return type() == Type.mapped; } /** * Returns the dimension resulting from combining two dimensions having the same name but possibly different * types: * * [N] + [M] = [ min(N, M) ] * [N] + [] = [] * [] + {} = {} */ Dimension combineWith(Optional<Dimension> other, boolean allowDifferentSizes) { if ( ! other.isPresent()) return this; if (this instanceof MappedDimension) return this; if (other.get() instanceof MappedDimension) return other.get(); if (this instanceof IndexedUnboundDimension) return this; if (other.get() instanceof IndexedUnboundDimension) return other.get(); IndexedBoundDimension thisIb = (IndexedBoundDimension)this; IndexedBoundDimension otherIb = (IndexedBoundDimension)other.get(); if (allowDifferentSizes) return thisIb.size().get() < otherIb.size().get() ? thisIb : otherIb; if ( ! thisIb.size().equals(otherIb.size())) throw new IllegalArgumentException("Unequal dimension sizes in " + thisIb + " and " + otherIb); return thisIb; } @Override public abstract String toString(); @Override public boolean equals(Object other) { if (this == other) return true; if (other == null || getClass() != other.getClass()) return false; return name.equals(((Dimension)other).name); } @Override public int hashCode() { return name.hashCode(); } @Override public int compareTo(Dimension other) { return this.name.compareTo(other.name); } public static Dimension indexed(String name, long size) { return new IndexedBoundDimension(name, size); } public static Dimension indexed(String name) { return new IndexedUnboundDimension(name); } public static Dimension mapped(String name) { return new MappedDimension(name); } static private String requireIdentifier(String name) { if (name == null) throw new IllegalArgumentException("A dimension name cannot be null"); if ( ! TensorType.labelMatcher.matches(name)) throw new IllegalArgumentException("A dimension name must be an identifier or integer, not '" + name + "'"); return name; } } public static class IndexedBoundDimension extends TensorType.Dimension { private final Long size; private IndexedBoundDimension(String name, long size) { super(name); if (size < 1) throw new IllegalArgumentException("Size of bound dimension '" + name + "' must be at least 1"); if (size > Integer.MAX_VALUE) throw new IllegalArgumentException("Size of bound dimension '" + name + "' cannot be larger than " + Integer.MAX_VALUE); this.size = size; } @Override public Optional<Long> size() { return Optional.of(size); } @Override public Type type() { return Type.indexedBound; } @Override public IndexedBoundDimension withName(String name) { return new IndexedBoundDimension(name, size); } @Override public String toString() { return name() + "[" + size + "]"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; IndexedBoundDimension that = (IndexedBoundDimension) o; if (!size.equals(that.size)) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + size.hashCode(); return result; } } public static class IndexedUnboundDimension extends TensorType.Dimension { private IndexedUnboundDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.indexedUnbound; } @Override public IndexedUnboundDimension withName(String name) { return new IndexedUnboundDimension(name); } @Override public String toString() { return name() + "[]"; } } public static class MappedDimension extends TensorType.Dimension { private MappedDimension(String name) { super(name); } @Override public Optional<Long> size() { return Optional.empty(); } @Override public Type type() { return Type.mapped; } @Override public MappedDimension withName(String name) { return new MappedDimension(name); } @Override public String toString() { return name() + "{}"; } } public static class Builder { private final Map<String, Dimension> dimensions = new LinkedHashMap<>(); private final Value valueType; /** Creates an empty builder with cells of type double */ public Builder() { this(Value.DOUBLE); } public Builder(Value valueType) { this.valueType = valueType; } /** * Creates a builder containing a combination of the dimensions of the given types * * If the same dimension is indexed with different size restrictions the smallest size will be used. * If it is size restricted in one argument but not the other it will not be size restricted. * If it is indexed in one and mapped in the other it will become mapped. * * The value type will be the largest of the value types of the input types */ public Builder(TensorType ... types) { this(true, types); } public Builder(boolean allowDifferentSizes, TensorType ... types) { this.valueType = TensorType.combinedValueType(types); for (TensorType type : types) addDimensionsOf(type, allowDifferentSizes); } /** Creates a builder from the given dimensions, having double as the value type */ public Builder(Iterable<Dimension> dimensions) { this(Value.DOUBLE, dimensions); } /** Creates a builder from the given value type and dimensions */ public Builder(Value valueType, Iterable<Dimension> dimensions) { this.valueType = valueType; for (TensorType.Dimension dimension : dimensions) { dimension(dimension); } } private void addDimensionsOf(TensorType type, boolean allowDifferentSizes) { for (Dimension dimension : type.dimensions) { set(dimension.combineWith(Optional.ofNullable(dimensions.get(dimension.name())), allowDifferentSizes)); } } /** Returns the current number of dimensions in this */ public int rank() { return dimensions.size(); } /** * Adds a new dimension to this * * @throws IllegalArgumentException if the dimension is already present */ private Builder add(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); if (dimensions.containsKey(dimension.name())) throw new IllegalArgumentException("Could not add dimension " + dimension + " as this dimension " + "is already present"); dimensions.put(dimension.name(), dimension); return this; } /** Adds or replaces a dimension in this */ public Builder set(Dimension dimension) { Objects.requireNonNull(dimension, "A dimension cannot be null"); dimensions.put(dimension.name(), dimension); return this; } /** * Adds a bound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name, long size) { return add(new IndexedBoundDimension(name, size)); } /** * Adds an unbound indexed dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder indexed(String name) { return add(new IndexedUnboundDimension(name)); } /** * Adds a mapped dimension to this * * @throws IllegalArgumentException if the dimension is already present */ public Builder mapped(String name) { return add(new MappedDimension(name)); } /** Adds the given dimension */ public Builder dimension(Dimension dimension) { return add(dimension); } /** Returns the given dimension, or empty if none is present */ public Optional<Dimension> getDimension(String dimension) { return Optional.ofNullable(dimensions.get(dimension)); } public Builder dimension(String name, Dimension.Type type) { switch (type) { case mapped : mapped(name); break; case indexedUnbound : indexed(name); break; default : throw new IllegalArgumentException("This can not create a dimension of type " + type); } return this; } public TensorType build() { return new TensorType(valueType, dimensions.values()); } } }
Thanks, fixed.
private void encodeBFloat16Cells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putShort((short)(Float.floatToRawIntBits(tensor.getFloat(i)) >>> 16)); }
buffer.putShort((short)(Float.floatToRawIntBits(tensor.getFloat(i)) >>> 16));
private void encodeBFloat16Cells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putShort(TypedBinaryFormat.bFloat16BitsFromFloat(tensor.getFloat(i))); }
class DenseBinaryFormat implements BinaryFormat { private final TensorType.Value serializationValueType; DenseBinaryFormat() { this(TensorType.Value.DOUBLE); } DenseBinaryFormat(TensorType.Value serializationValueType) { this.serializationValueType = serializationValueType; } @Override public void encode(GrowableByteBuffer buffer, Tensor tensor) { if ( ! ( tensor instanceof IndexedTensor)) throw new RuntimeException("The dense format is only supported for indexed tensors"); encodeDimensions(buffer, (IndexedTensor)tensor); encodeCells(buffer, (IndexedTensor)tensor); } private void encodeDimensions(GrowableByteBuffer buffer, IndexedTensor tensor) { buffer.putInt1_4Bytes(tensor.type().dimensions().size()); for (int i = 0; i < tensor.type().dimensions().size(); i++) { buffer.putUtf8String(tensor.type().dimensions().get(i).name()); buffer.putInt1_4Bytes((int)tensor.dimensionSizes().size(i)); } } private void encodeCells(GrowableByteBuffer buffer, IndexedTensor tensor) { switch (serializationValueType) { case DOUBLE: encodeDoubleCells(tensor, buffer); break; case FLOAT: encodeFloatCells(tensor, buffer); break; case BFLOAT16: encodeBFloat16Cells(tensor, buffer); break; case INT8: encodeInt8Cells(tensor, buffer); break; } } private void encodeDoubleCells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putDouble(tensor.get(i)); } private void encodeFloatCells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putFloat(tensor.getFloat(i)); } private void encodeInt8Cells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.put((byte) tensor.getFloat(i)); } @Override public Tensor decode(Optional<TensorType> optionalType, GrowableByteBuffer buffer) { TensorType type; DimensionSizes sizes; if (optionalType.isPresent()) { type = optionalType.get(); if (type.valueType() != this.serializationValueType) { throw new IllegalArgumentException("Tensor value type mismatch. Value type " + type.valueType() + " is not " + this.serializationValueType); } TensorType serializedType = decodeType(buffer); if ( ! serializedType.isAssignableTo(type)) throw new IllegalArgumentException("Type/instance mismatch: A tensor of type " + serializedType + " cannot be assigned to type " + type); sizes = sizesFromType(serializedType); } else { type = decodeType(buffer); sizes = sizesFromType(type); } Tensor.Builder builder = Tensor.Builder.of(type, sizes); decodeCells(sizes, buffer, (IndexedTensor.BoundBuilder)builder); return builder.build(); } private TensorType decodeType(GrowableByteBuffer buffer) { TensorType.Builder builder = new TensorType.Builder(serializationValueType); int dimensionCount = buffer.getInt1_4Bytes(); for (int i = 0; i < dimensionCount; i++) builder.indexed(buffer.getUtf8String(), buffer.getInt1_4Bytes()); return builder.build(); } /** Returns dimension sizes from a type consisting of fully specified, indexed dimensions only */ private DimensionSizes sizesFromType(TensorType type) { DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < type.dimensions().size(); i++) builder.set(i, type.dimensions().get(i).size().get()); return builder.build(); } private void decodeCells(DimensionSizes sizes, GrowableByteBuffer buffer, IndexedTensor.BoundBuilder builder) { switch (serializationValueType) { case DOUBLE: decodeDoubleCells(sizes, builder, buffer); break; case FLOAT: decodeFloatCells(sizes, builder, buffer); break; case BFLOAT16: decodeBFloat16Cells(sizes, builder, buffer); break; case INT8: decodeInt8Cells(sizes, builder, buffer); break; } } private void decodeDoubleCells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) builder.cellByDirectIndex(i, buffer.getDouble()); } private void decodeFloatCells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) builder.cellByDirectIndex(i, buffer.getFloat()); } private void decodeBFloat16Cells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) { builder.cellByDirectIndex(i, Float.intBitsToFloat(buffer.getShort() << 16)); } } private void decodeInt8Cells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) { builder.cellByDirectIndex(i, (float) buffer.get()); } } }
class DenseBinaryFormat implements BinaryFormat { private final TensorType.Value serializationValueType; DenseBinaryFormat() { this(TensorType.Value.DOUBLE); } DenseBinaryFormat(TensorType.Value serializationValueType) { this.serializationValueType = serializationValueType; } @Override public void encode(GrowableByteBuffer buffer, Tensor tensor) { if ( ! ( tensor instanceof IndexedTensor)) throw new RuntimeException("The dense format is only supported for indexed tensors"); encodeDimensions(buffer, (IndexedTensor)tensor); encodeCells(buffer, (IndexedTensor)tensor); } private void encodeDimensions(GrowableByteBuffer buffer, IndexedTensor tensor) { buffer.putInt1_4Bytes(tensor.type().dimensions().size()); for (int i = 0; i < tensor.type().dimensions().size(); i++) { buffer.putUtf8String(tensor.type().dimensions().get(i).name()); buffer.putInt1_4Bytes((int)tensor.dimensionSizes().size(i)); } } private void encodeCells(GrowableByteBuffer buffer, IndexedTensor tensor) { switch (serializationValueType) { case DOUBLE: encodeDoubleCells(tensor, buffer); break; case FLOAT: encodeFloatCells(tensor, buffer); break; case BFLOAT16: encodeBFloat16Cells(tensor, buffer); break; case INT8: encodeInt8Cells(tensor, buffer); break; } } private void encodeDoubleCells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putDouble(tensor.get(i)); } private void encodeFloatCells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.putFloat(tensor.getFloat(i)); } private void encodeInt8Cells(IndexedTensor tensor, GrowableByteBuffer buffer) { for (int i = 0; i < tensor.size(); i++) buffer.put((byte) tensor.getFloat(i)); } @Override public Tensor decode(Optional<TensorType> optionalType, GrowableByteBuffer buffer) { TensorType type; DimensionSizes sizes; if (optionalType.isPresent()) { type = optionalType.get(); if (type.valueType() != this.serializationValueType) { throw new IllegalArgumentException("Tensor value type mismatch. Value type " + type.valueType() + " is not " + this.serializationValueType); } TensorType serializedType = decodeType(buffer); if ( ! serializedType.isAssignableTo(type)) throw new IllegalArgumentException("Type/instance mismatch: A tensor of type " + serializedType + " cannot be assigned to type " + type); sizes = sizesFromType(serializedType); } else { type = decodeType(buffer); sizes = sizesFromType(type); } Tensor.Builder builder = Tensor.Builder.of(type, sizes); decodeCells(sizes, buffer, (IndexedTensor.BoundBuilder)builder); return builder.build(); } private TensorType decodeType(GrowableByteBuffer buffer) { TensorType.Builder builder = new TensorType.Builder(serializationValueType); int dimensionCount = buffer.getInt1_4Bytes(); for (int i = 0; i < dimensionCount; i++) builder.indexed(buffer.getUtf8String(), buffer.getInt1_4Bytes()); return builder.build(); } /** Returns dimension sizes from a type consisting of fully specified, indexed dimensions only */ private DimensionSizes sizesFromType(TensorType type) { DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size()); for (int i = 0; i < type.dimensions().size(); i++) builder.set(i, type.dimensions().get(i).size().get()); return builder.build(); } private void decodeCells(DimensionSizes sizes, GrowableByteBuffer buffer, IndexedTensor.BoundBuilder builder) { switch (serializationValueType) { case DOUBLE: decodeDoubleCells(sizes, builder, buffer); break; case FLOAT: decodeFloatCells(sizes, builder, buffer); break; case BFLOAT16: decodeBFloat16Cells(sizes, builder, buffer); break; case INT8: decodeInt8Cells(sizes, builder, buffer); break; } } private void decodeDoubleCells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) builder.cellByDirectIndex(i, buffer.getDouble()); } private void decodeFloatCells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) builder.cellByDirectIndex(i, buffer.getFloat()); } private void decodeBFloat16Cells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) { builder.cellByDirectIndex(i, TypedBinaryFormat.floatFromBFloat16Bits(buffer.getShort())); } } private void decodeInt8Cells(DimensionSizes sizes, IndexedTensor.BoundBuilder builder, GrowableByteBuffer buffer) { for (long i = 0; i < sizes.totalSize(); i++) { builder.cellByDirectIndex(i, (float) buffer.get()); } } }
We want at least the `system` dimension here. But ideally i think we should report per zone with `zone` dimension and we can aggregate in yamas for entire system if we want.
protected boolean maintain() { metric.add(bucketCountMetricName, zoneRegistry.zones().all().ids().stream() .map(archiveBucketDb::buckets) .mapToLong(Collection::size) .sum(), metric.createContext(Map.of())); var tenantArchiveAccessRoles = controller().tenants().asList().stream() .filter(t -> t instanceof CloudTenant) .map(t -> (CloudTenant) t) .filter(t -> t.archiveAccessRole().isPresent()) .collect(Collectors.toUnmodifiableMap( Tenant::name, cloudTenant -> cloudTenant.archiveAccessRole().orElseThrow())); zoneRegistry.zones().controllerUpgraded().ids().forEach(zoneId -> archiveBucketDb.buckets(zoneId).forEach(archiveBucket -> archiveService.updateBucketAndKeyPolicy(zoneId, archiveBucket, Maps.filterEntries(tenantArchiveAccessRoles, entry -> archiveBucket.tenants().contains(entry.getKey()))) ) ); return true; }
metric.createContext(Map.of()));
protected boolean maintain() { zoneRegistry.zones().all().ids().forEach(zoneId -> metric.set(bucketCountMetricName, archiveBucketDb.buckets(zoneId).size(), metric.createContext(Map.of("zone", zoneId.value())))); var tenantArchiveAccessRoles = controller().tenants().asList().stream() .filter(t -> t instanceof CloudTenant) .map(t -> (CloudTenant) t) .filter(t -> t.archiveAccessRole().isPresent()) .collect(Collectors.toUnmodifiableMap( Tenant::name, cloudTenant -> cloudTenant.archiveAccessRole().orElseThrow())); zoneRegistry.zones().controllerUpgraded().ids().forEach(zoneId -> archiveBucketDb.buckets(zoneId).forEach(archiveBucket -> archiveService.updateBucketAndKeyPolicy(zoneId, archiveBucket, Maps.filterEntries(tenantArchiveAccessRoles, entry -> archiveBucket.tenants().contains(entry.getKey()))) ) ); return true; }
class ArchiveAccessMaintainer extends ControllerMaintainer { private static final String bucketCountMetricName = "archive.bucketCount"; private final ArchiveBucketDb archiveBucketDb; private final ArchiveService archiveService; private final ZoneRegistry zoneRegistry; private final Metric metric; public ArchiveAccessMaintainer(Controller controller, Metric metric, Duration interval) { super(controller, interval); this.archiveBucketDb = controller.archiveBucketDb(); this.archiveService = controller.serviceRegistry().archiveService(); this.zoneRegistry = controller().zoneRegistry(); this.metric = metric; } @Override }
class ArchiveAccessMaintainer extends ControllerMaintainer { private static final String bucketCountMetricName = "archive.bucketCount"; private final ArchiveBucketDb archiveBucketDb; private final ArchiveService archiveService; private final ZoneRegistry zoneRegistry; private final Metric metric; public ArchiveAccessMaintainer(Controller controller, Metric metric, Duration interval) { super(controller, interval); this.archiveBucketDb = controller.archiveBucketDb(); this.archiveService = controller.serviceRegistry().archiveService(); this.zoneRegistry = controller().zoneRegistry(); this.metric = metric; } @Override }
This should copy config, not defaultRequestConfig.
public RequestBuilder timeout(Duration timeout) { return config(RequestConfig.copy(defaultRequestConfig) .setResponseTimeout(timeout.toMillis(), TimeUnit.MILLISECONDS) .build()); }
return config(RequestConfig.copy(defaultRequestConfig)
public RequestBuilder timeout(Duration timeout) { return config(RequestConfig.copy(defaultRequestConfig) .setResponseTimeout(timeout.toMillis(), TimeUnit.MILLISECONDS) .build()); }
class RequestBuilder implements ConfigServerClient.RequestBuilder { private final Method method; private final HostStrategy hosts; private final URIBuilder uriBuilder = new URIBuilder(); private HttpEntity entity; private RequestConfig config = defaultRequestConfig; private RequestBuilder(HostStrategy hosts, Method method) { if ( ! hosts.iterator().hasNext()) throw new IllegalArgumentException("Host strategy cannot be empty"); this.hosts = hosts; this.method = requireNonNull(method); } @Override public RequestBuilder at(String... pathSegments) { uriBuilder.setPathSegments(requireNonNull(pathSegments)); return this; } @Override public ConfigServerClient.RequestBuilder body(byte[] json) { return body(HttpEntities.create(json, ContentType.APPLICATION_JSON)); } @Override public RequestBuilder body(HttpEntity entity) { this.entity = requireNonNull(entity); return this; } @Override public RequestBuilder parameters(String... pairs) { if (pairs.length % 2 != 0) throw new IllegalArgumentException("Must supply parameter key/values in pairs"); for (int i = 0; i < pairs.length; ) uriBuilder.setParameter(pairs[i++], pairs[i++]); return this; } @Override @Override public RequestBuilder config(RequestConfig config) { this.config = requireNonNull(config); return this; } @Override public <T> T handle(BiFunction<ClassicHttpResponse, IOException, T> handler) throws UncheckedIOException { return execute(this, requireNonNull(handler)); } @Override public <T> T read(Function<byte[], T> mapper) throws UncheckedIOException, ConfigServerException { return mapIfSuccess(input -> { try (input) { return mapper.apply(input.readAllBytes()); } catch (IOException e) { throw new RetryException(e); } }); } @Override public void discard() throws UncheckedIOException, ConfigServerException { mapIfSuccess(input -> { try (input) { return null; } catch (IOException e) { throw new RetryException(e); } }); } @Override public InputStream stream() throws UncheckedIOException, ConfigServerException { return mapIfSuccess(input -> input); } /** Returns the mapped body, if successful, retrying any IOException. The caller must close the body stream. */ private <T> T mapIfSuccess(Function<InputStream, T> mapper) { return handle((response, ioException) -> { if (response != null) { try { InputStream body = response.getEntity() != null ? response.getEntity().getContent() : InputStream.nullInputStream(); if (response.getCode() >= HttpStatus.SC_REDIRECTION) throw readException(body.readAllBytes()); return mapper.apply(new ForwardingInputStream(body) { @Override public void close() throws IOException { super.close(); response.close(); } }); } catch (IOException | RuntimeException | Error e) { try { response.close(); } catch (IOException f) { e.addSuppressed(f); } if (e instanceof IOException) ioException = (IOException) e; else sneakyThrow(e); } } throw new RetryException(ioException); }); } }
class RequestBuilder implements ConfigServerClient.RequestBuilder { private final Method method; private final HostStrategy hosts; private final URIBuilder uriBuilder = new URIBuilder(); private HttpEntity entity; private RequestConfig config = defaultRequestConfig; private RequestBuilder(HostStrategy hosts, Method method) { if ( ! hosts.iterator().hasNext()) throw new IllegalArgumentException("Host strategy cannot be empty"); this.hosts = hosts; this.method = requireNonNull(method); } @Override public RequestBuilder at(String... pathSegments) { uriBuilder.setPathSegments(requireNonNull(pathSegments)); return this; } @Override public ConfigServerClient.RequestBuilder body(byte[] json) { return body(HttpEntities.create(json, ContentType.APPLICATION_JSON)); } @Override public RequestBuilder body(HttpEntity entity) { this.entity = requireNonNull(entity); return this; } @Override public RequestBuilder parameters(String... pairs) { if (pairs.length % 2 != 0) throw new IllegalArgumentException("Must supply parameter key/values in pairs"); for (int i = 0; i < pairs.length; ) uriBuilder.setParameter(pairs[i++], pairs[i++]); return this; } @Override @Override public RequestBuilder config(RequestConfig config) { this.config = requireNonNull(config); return this; } @Override public <T> T handle(BiFunction<ClassicHttpResponse, IOException, T> handler) throws UncheckedIOException { return execute(this, requireNonNull(handler)); } @Override public <T> T read(Function<byte[], T> mapper) throws UncheckedIOException, ConfigServerException { return mapIfSuccess(input -> { try (input) { return mapper.apply(input.readAllBytes()); } catch (IOException e) { throw new RetryException(e); } }); } @Override public void discard() throws UncheckedIOException, ConfigServerException { mapIfSuccess(input -> { try (input) { return null; } catch (IOException e) { throw new RetryException(e); } }); } @Override public InputStream stream() throws UncheckedIOException, ConfigServerException { return mapIfSuccess(input -> input); } /** Returns the mapped body, if successful, retrying any IOException. The caller must close the body stream. */ private <T> T mapIfSuccess(Function<InputStream, T> mapper) { return handle((response, ioException) -> { if (response != null) { try { InputStream body = response.getEntity() != null ? response.getEntity().getContent() : InputStream.nullInputStream(); if (response.getCode() >= HttpStatus.SC_REDIRECTION) throw readException(body.readAllBytes()); return mapper.apply(new ForwardingInputStream(body) { @Override public void close() throws IOException { super.close(); response.close(); } }); } catch (IOException | RuntimeException | Error e) { try { response.close(); } catch (IOException f) { e.addSuppressed(f); } if (e instanceof IOException) ioException = (IOException) e; else sneakyThrow(e); } } throw new RetryException(ioException); }); } }
Thanks.
public RequestBuilder timeout(Duration timeout) { return config(RequestConfig.copy(defaultRequestConfig) .setResponseTimeout(timeout.toMillis(), TimeUnit.MILLISECONDS) .build()); }
return config(RequestConfig.copy(defaultRequestConfig)
public RequestBuilder timeout(Duration timeout) { return config(RequestConfig.copy(defaultRequestConfig) .setResponseTimeout(timeout.toMillis(), TimeUnit.MILLISECONDS) .build()); }
class RequestBuilder implements ConfigServerClient.RequestBuilder { private final Method method; private final HostStrategy hosts; private final URIBuilder uriBuilder = new URIBuilder(); private HttpEntity entity; private RequestConfig config = defaultRequestConfig; private RequestBuilder(HostStrategy hosts, Method method) { if ( ! hosts.iterator().hasNext()) throw new IllegalArgumentException("Host strategy cannot be empty"); this.hosts = hosts; this.method = requireNonNull(method); } @Override public RequestBuilder at(String... pathSegments) { uriBuilder.setPathSegments(requireNonNull(pathSegments)); return this; } @Override public ConfigServerClient.RequestBuilder body(byte[] json) { return body(HttpEntities.create(json, ContentType.APPLICATION_JSON)); } @Override public RequestBuilder body(HttpEntity entity) { this.entity = requireNonNull(entity); return this; } @Override public RequestBuilder parameters(String... pairs) { if (pairs.length % 2 != 0) throw new IllegalArgumentException("Must supply parameter key/values in pairs"); for (int i = 0; i < pairs.length; ) uriBuilder.setParameter(pairs[i++], pairs[i++]); return this; } @Override @Override public RequestBuilder config(RequestConfig config) { this.config = requireNonNull(config); return this; } @Override public <T> T handle(BiFunction<ClassicHttpResponse, IOException, T> handler) throws UncheckedIOException { return execute(this, requireNonNull(handler)); } @Override public <T> T read(Function<byte[], T> mapper) throws UncheckedIOException, ConfigServerException { return mapIfSuccess(input -> { try (input) { return mapper.apply(input.readAllBytes()); } catch (IOException e) { throw new RetryException(e); } }); } @Override public void discard() throws UncheckedIOException, ConfigServerException { mapIfSuccess(input -> { try (input) { return null; } catch (IOException e) { throw new RetryException(e); } }); } @Override public InputStream stream() throws UncheckedIOException, ConfigServerException { return mapIfSuccess(input -> input); } /** Returns the mapped body, if successful, retrying any IOException. The caller must close the body stream. */ private <T> T mapIfSuccess(Function<InputStream, T> mapper) { return handle((response, ioException) -> { if (response != null) { try { InputStream body = response.getEntity() != null ? response.getEntity().getContent() : InputStream.nullInputStream(); if (response.getCode() >= HttpStatus.SC_REDIRECTION) throw readException(body.readAllBytes()); return mapper.apply(new ForwardingInputStream(body) { @Override public void close() throws IOException { super.close(); response.close(); } }); } catch (IOException | RuntimeException | Error e) { try { response.close(); } catch (IOException f) { e.addSuppressed(f); } if (e instanceof IOException) ioException = (IOException) e; else sneakyThrow(e); } } throw new RetryException(ioException); }); } }
class RequestBuilder implements ConfigServerClient.RequestBuilder { private final Method method; private final HostStrategy hosts; private final URIBuilder uriBuilder = new URIBuilder(); private HttpEntity entity; private RequestConfig config = defaultRequestConfig; private RequestBuilder(HostStrategy hosts, Method method) { if ( ! hosts.iterator().hasNext()) throw new IllegalArgumentException("Host strategy cannot be empty"); this.hosts = hosts; this.method = requireNonNull(method); } @Override public RequestBuilder at(String... pathSegments) { uriBuilder.setPathSegments(requireNonNull(pathSegments)); return this; } @Override public ConfigServerClient.RequestBuilder body(byte[] json) { return body(HttpEntities.create(json, ContentType.APPLICATION_JSON)); } @Override public RequestBuilder body(HttpEntity entity) { this.entity = requireNonNull(entity); return this; } @Override public RequestBuilder parameters(String... pairs) { if (pairs.length % 2 != 0) throw new IllegalArgumentException("Must supply parameter key/values in pairs"); for (int i = 0; i < pairs.length; ) uriBuilder.setParameter(pairs[i++], pairs[i++]); return this; } @Override @Override public RequestBuilder config(RequestConfig config) { this.config = requireNonNull(config); return this; } @Override public <T> T handle(BiFunction<ClassicHttpResponse, IOException, T> handler) throws UncheckedIOException { return execute(this, requireNonNull(handler)); } @Override public <T> T read(Function<byte[], T> mapper) throws UncheckedIOException, ConfigServerException { return mapIfSuccess(input -> { try (input) { return mapper.apply(input.readAllBytes()); } catch (IOException e) { throw new RetryException(e); } }); } @Override public void discard() throws UncheckedIOException, ConfigServerException { mapIfSuccess(input -> { try (input) { return null; } catch (IOException e) { throw new RetryException(e); } }); } @Override public InputStream stream() throws UncheckedIOException, ConfigServerException { return mapIfSuccess(input -> input); } /** Returns the mapped body, if successful, retrying any IOException. The caller must close the body stream. */ private <T> T mapIfSuccess(Function<InputStream, T> mapper) { return handle((response, ioException) -> { if (response != null) { try { InputStream body = response.getEntity() != null ? response.getEntity().getContent() : InputStream.nullInputStream(); if (response.getCode() >= HttpStatus.SC_REDIRECTION) throw readException(body.readAllBytes()); return mapper.apply(new ForwardingInputStream(body) { @Override public void close() throws IOException { super.close(); response.close(); } }); } catch (IOException | RuntimeException | Error e) { try { response.close(); } catch (IOException f) { e.addSuppressed(f); } if (e instanceof IOException) ioException = (IOException) e; else sneakyThrow(e); } } throw new RetryException(ioException); }); } }
This seems a little too complicated to me, why not just have an atomic boolean "closed"? But we still need to check for it everywhere - do we actually have any problems with this?
public void close() { synchronized (clusterTable.writeLock) { CairoEngine myEngine = engine.getAndSet(null); if (myEngine != null) { myEngine.close(); } } }
CairoEngine myEngine = engine.getAndSet(null);
public void close() { if (closed.getAndSet(true)) return; synchronized (nodeTable.writeLock) { synchronized (clusterTable.writeLock) { for (SqlCompiler sqlCompiler : sqlCompilerPool) sqlCompiler.close(); engine.close(); } } }
class QuestMetricsDb extends AbstractComponent implements MetricsDb { private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName()); private final Table nodeTable; private final Table clusterTable; private final Clock clock; private final String dataDir; private final AtomicReference<CairoEngine> engine = new AtomicReference<>(); private final ThreadLocal<SqlCompiler> sqlCompiler; private final AtomicInteger nullRecords = new AtomicInteger(); @Inject public QuestMetricsDb() { this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC()); } public QuestMetricsDb(String dataDir, Clock clock) { this.clock = clock; if (dataDir.startsWith(Defaults.getDefaults().vespaHome()) && ! new File(Defaults.getDefaults().vespaHome()).exists()) dataDir = "data"; String logConfig = dataDir + "/quest-log.conf"; IOUtils.createDirectory(logConfig); IOUtils.writeFile(new File(logConfig), new byte[0]); System.setProperty("out", logConfig); this.dataDir = dataDir; engine.set(new CairoEngine(new DefaultCairoConfiguration(dataDir))); sqlCompiler = ThreadLocal.withInitial(() -> new SqlCompiler(engine.get())); nodeTable = new Table(dataDir, "metrics", clock); clusterTable = new Table(dataDir, "clusterMetrics", clock); ensureTablesExist(); } @Override public Clock clock() { return clock; } @Override public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) { try { addNodeMetricsBody(snapshots); } catch (CairoException e) { if (e.getMessage().contains("Cannot read offset")) { nodeTable.repair(e); addNodeMetricsBody(snapshots); } } } private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) { synchronized (nodeTable.writeLock) { try (TableWriter writer = nodeTable.getWriter()) { for (var snapshot : snapshots) { Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at()); if (atMillis.isEmpty()) continue; TableWriter.Row row = writer.newRow(atMillis.get() * 1000); row.putStr(0, snapshot.getFirst()); row.putFloat(2, (float) snapshot.getSecond().load().cpu()); row.putFloat(3, (float) snapshot.getSecond().load().memory()); row.putFloat(4, (float) snapshot.getSecond().load().disk()); row.putLong(5, snapshot.getSecond().generation()); row.putBool(6, snapshot.getSecond().inService()); row.putBool(7, snapshot.getSecond().stable()); row.putFloat(8, (float) snapshot.getSecond().queryRate()); row.append(); } writer.commit(); } } } @Override public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) { try { addClusterMetricsBody(application, snapshots); } catch (CairoException e) { if (e.getMessage().contains("Cannot read offset")) { clusterTable.repair(e); addClusterMetricsBody(application, snapshots); } } } private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) { synchronized (clusterTable.writeLock) { try (TableWriter writer = clusterTable.getWriter()) { for (var snapshot : snapshots.entrySet()) { Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at()); if (atMillis.isEmpty()) continue; TableWriter.Row row = writer.newRow(atMillis.get() * 1000); row.putStr(0, applicationId.serializedForm()); row.putStr(1, snapshot.getKey().value()); row.putFloat(3, (float) snapshot.getValue().queryRate()); row.putFloat(4, (float) snapshot.getValue().writeRate()); row.append(); } writer.commit(); } } } @Override public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) { try { var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext()); return snapshots.entrySet().stream() .map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue())) .collect(Collectors.toList()); } catch (SqlException e) { throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e); } } @Override public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) { try { return getClusterSnapshots(applicationId, clusterId); } catch (SqlException e) { throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e); } } public int getNullRecordsCount() { return nullRecords.get(); } @Override public void gc() { nullRecords.set(0); nodeTable.gc(); clusterTable.gc(); } @Override public void deconstruct() { close(); } @Override private void ensureTablesExist() { if (nodeTable.exists()) ensureNodeTableIsUpdated(); else createNodeTable(); if (clusterTable.exists()) ensureClusterTableIsUpdated(); else createClusterTable(); } private void ensureNodeTableIsUpdated() { try { } catch (Exception e) { nodeTable.repair(e); } } private void ensureClusterTableIsUpdated() { try { if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) { } } catch (Exception e) { clusterTable.repair(e); } } private void createNodeTable() { try { issue("create table " + nodeTable.name + " (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," + " application_generation long, inService boolean, stable boolean, queries_rate float)" + " timestamp(at)" + "PARTITION BY DAY;", newContext()); } catch (SqlException e) { throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e); } } private void createClusterTable() { try { issue("create table " + clusterTable.name + " (application string, cluster string, at timestamp, queries_rate float, write_rate float)" + " timestamp(at)" + "PARTITION BY DAY;", newContext()); } catch (SqlException e) { throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e); } } private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime, Set<String> hostnames, SqlExecutionContext context) throws SqlException { DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC")); String from = formatter.format(startTime).substring(0, 19) + ".000000Z"; String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z"; String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');"; try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) { ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>(); try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { if (record == null || record.getStr(0) == null) { nullRecords.incrementAndGet(); continue; } String hostname = record.getStr(0).toString(); if (hostnames.isEmpty() || hostnames.contains(hostname)) { snapshots.put(hostname, new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000), new Load(record.getFloat(2), record.getFloat(3), record.getFloat(4)), record.getLong(5), record.getBool(6), record.getBool(7), record.getFloat(8))); } } } return snapshots; } } private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException { String sql = "select * from " + clusterTable.name; var context = newContext(); try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) { List<ClusterMetricSnapshot> snapshots = new ArrayList<>(); try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { String applicationIdString = record.getStr(0).toString(); if ( ! application.serializedForm().equals(applicationIdString)) continue; String clusterId = record.getStr(1).toString(); if (cluster.value().equals(clusterId)) { snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000), record.getFloat(3), record.getFloat(4))); } } } return new ClusterTimeseries(cluster, snapshots); } } /** Issues an SQL statement against the QuestDb engine */ private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException { return sqlCompiler.get().compile(sql, context); } private SqlExecutionContext newContext() { return new SqlExecutionContextImpl(engine.get(), 1); } /** A questDb table */ private class Table { private final Object writeLock = new Object(); private final String name; private final Clock clock; private final File dir; private long highestTimestampAdded = 0; Table(String dataDir, String name, Clock clock) { this.name = name; this.clock = clock; this.dir = new File(dataDir, name); IOUtils.createDirectory(dir.getPath()); new File(dir + "/_txn_scoreboard").delete(); } boolean exists() { return 0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), name); } TableWriter getWriter() { return engine.get().getWriter(newContext().getCairoSecurityContext(), name); } void gc() { synchronized (writeLock) { Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4)); SqlExecutionContext context = newContext(); int partitions = 0; try { List<String> removeList = new ArrayList<>(); for (String dirEntry : dir.list()) { File partitionDir = new File(dir, dirEntry); if (!partitionDir.isDirectory()) continue; partitions++; DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC")); Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00")); if (partitionDay.isBefore(oldestToKeep)) removeList.add(dirEntry); } if (removeList.size() < partitions && !removeList.isEmpty()) { issue("alter table " + name + " drop partition list " + removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")), context); } } catch (SqlException e) { log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e); } } } /** * Repairs this db on corruption. * * @param e the exception indicating corruption */ private void repair(Exception e) { log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e); IOUtils.recursiveDeleteDir(dir); IOUtils.createDirectory(dir.getPath()); ensureTablesExist(); } void ensureColumnExists(String column, String columnType) throws SqlException { if (columnNames().contains(column)) return; issue("alter table " + name + " add column " + column + " " + columnType, newContext()); } private Optional<Long> adjustOrDiscard(Instant at) { long timestamp = at.toEpochMilli(); if (timestamp >= highestTimestampAdded) { highestTimestampAdded = timestamp; return Optional.of(timestamp); } if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded); return Optional.empty(); } private List<String> columnNames() throws SqlException { var context = newContext(); List<String> columns = new ArrayList<>(); try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) { try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { columns.add(record.getStr(0).toString()); } } } return columns; } } }
class QuestMetricsDb extends AbstractComponent implements MetricsDb { private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName()); private final Table nodeTable; private final Table clusterTable; private final Clock clock; private final String dataDir; private final CairoEngine engine; private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool; private final AtomicBoolean closed = new AtomicBoolean(false); @Inject public QuestMetricsDb() { this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC()); } public QuestMetricsDb(String dataDir, Clock clock) { this.clock = clock; if (dataDir.startsWith(Defaults.getDefaults().vespaHome()) && ! new File(Defaults.getDefaults().vespaHome()).exists()) dataDir = "data"; String logConfig = dataDir + "/quest-log.conf"; IOUtils.createDirectory(logConfig); IOUtils.writeFile(new File(logConfig), new byte[0]); System.setProperty("out", logConfig); this.dataDir = dataDir; engine = new CairoEngine(new DefaultCairoConfiguration(dataDir)); sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine())); nodeTable = new Table(dataDir, "metrics", clock); clusterTable = new Table(dataDir, "clusterMetrics", clock); ensureTablesExist(); } private CairoEngine engine() { if (closed.get()) throw new IllegalStateException("Attempted to access QuestDb after calling close"); return engine; } @Override public Clock clock() { return clock; } @Override public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) { try { addNodeMetricsBody(snapshots); } catch (CairoException e) { if (e.getMessage().contains("Cannot read offset")) { nodeTable.repair(e); addNodeMetricsBody(snapshots); } } } private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) { synchronized (nodeTable.writeLock) { try (TableWriter writer = nodeTable.getWriter()) { for (var snapshot : snapshots) { Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at()); if (atMillis.isEmpty()) continue; TableWriter.Row row = writer.newRow(atMillis.get() * 1000); row.putStr(0, snapshot.getFirst()); row.putFloat(2, (float) snapshot.getSecond().load().cpu()); row.putFloat(3, (float) snapshot.getSecond().load().memory()); row.putFloat(4, (float) snapshot.getSecond().load().disk()); row.putLong(5, snapshot.getSecond().generation()); row.putBool(6, snapshot.getSecond().inService()); row.putBool(7, snapshot.getSecond().stable()); row.putFloat(8, (float) snapshot.getSecond().queryRate()); row.append(); } writer.commit(); } } } @Override public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) { try { addClusterMetricsBody(application, snapshots); } catch (CairoException e) { if (e.getMessage().contains("Cannot read offset")) { clusterTable.repair(e); addClusterMetricsBody(application, snapshots); } } } private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) { synchronized (clusterTable.writeLock) { try (TableWriter writer = clusterTable.getWriter()) { for (var snapshot : snapshots.entrySet()) { Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at()); if (atMillis.isEmpty()) continue; TableWriter.Row row = writer.newRow(atMillis.get() * 1000); row.putStr(0, applicationId.serializedForm()); row.putStr(1, snapshot.getKey().value()); row.putFloat(3, (float) snapshot.getValue().queryRate()); row.putFloat(4, (float) snapshot.getValue().writeRate()); row.append(); } writer.commit(); } } } @Override public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) { try { var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext()); return snapshots.entrySet().stream() .map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue())) .collect(Collectors.toList()); } catch (SqlException e) { throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e); } } @Override public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) { try { return getClusterSnapshots(applicationId, clusterId); } catch (SqlException e) { throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e); } } @Override public void gc() { nodeTable.gc(); clusterTable.gc(); } @Override public void deconstruct() { close(); } @Override private void ensureTablesExist() { if (nodeTable.exists()) ensureNodeTableIsUpdated(); else createNodeTable(); if (clusterTable.exists()) ensureClusterTableIsUpdated(); else createClusterTable(); } private void ensureNodeTableIsUpdated() { try { } catch (Exception e) { nodeTable.repair(e); } } private void ensureClusterTableIsUpdated() { try { if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) { } } catch (Exception e) { clusterTable.repair(e); } } private void createNodeTable() { try { issue("create table " + nodeTable.name + " (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," + " application_generation long, inService boolean, stable boolean, queries_rate float)" + " timestamp(at)" + "PARTITION BY DAY;", newContext()); } catch (SqlException e) { throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e); } } private void createClusterTable() { try { issue("create table " + clusterTable.name + " (application string, cluster string, at timestamp, queries_rate float, write_rate float)" + " timestamp(at)" + "PARTITION BY DAY;", newContext()); } catch (SqlException e) { throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e); } } private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime, Set<String> hostnames, SqlExecutionContext context) throws SqlException { DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC")); String from = formatter.format(startTime).substring(0, 19) + ".000000Z"; String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z"; String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');"; try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) { ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>(); try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { String hostname = record.getStr(0).toString(); if (hostnames.isEmpty() || hostnames.contains(hostname)) { snapshots.put(hostname, new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000), new Load(record.getFloat(2), record.getFloat(3), record.getFloat(4)), record.getLong(5), record.getBool(6), record.getBool(7), record.getFloat(8))); } } } return snapshots; } } private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException { String sql = "select * from " + clusterTable.name; var context = newContext(); try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) { List<ClusterMetricSnapshot> snapshots = new ArrayList<>(); try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { String applicationIdString = record.getStr(0).toString(); if ( ! application.serializedForm().equals(applicationIdString)) continue; String clusterId = record.getStr(1).toString(); if (cluster.value().equals(clusterId)) { snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000), record.getFloat(3), record.getFloat(4))); } } } return new ClusterTimeseries(cluster, snapshots); } } /** Issues an SQL statement against the QuestDb engine */ private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException { SqlCompiler sqlCompiler = sqlCompilerPool.alloc(); try { return sqlCompiler.compile(sql, context); } finally { sqlCompilerPool.free(sqlCompiler); } } private SqlExecutionContext newContext() { return new SqlExecutionContextImpl(engine(), 1); } /** A questDb table */ private class Table { private final Object writeLock = new Object(); private final String name; private final Clock clock; private final File dir; private long highestTimestampAdded = 0; Table(String dataDir, String name, Clock clock) { this.name = name; this.clock = clock; this.dir = new File(dataDir, name); IOUtils.createDirectory(dir.getPath()); new File(dir + "/_txn_scoreboard").delete(); } boolean exists() { return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name); } TableWriter getWriter() { return engine().getWriter(newContext().getCairoSecurityContext(), name); } void gc() { synchronized (writeLock) { Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4)); SqlExecutionContext context = newContext(); int partitions = 0; try { List<String> removeList = new ArrayList<>(); for (String dirEntry : dir.list()) { File partitionDir = new File(dir, dirEntry); if (!partitionDir.isDirectory()) continue; partitions++; DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC")); Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00")); if (partitionDay.isBefore(oldestToKeep)) removeList.add(dirEntry); } if (removeList.size() < partitions && !removeList.isEmpty()) { issue("alter table " + name + " drop partition list " + removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")), context); } } catch (SqlException e) { log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e); } } } /** * Repairs this db on corruption. * * @param e the exception indicating corruption */ private void repair(Exception e) { log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e); IOUtils.recursiveDeleteDir(dir); IOUtils.createDirectory(dir.getPath()); ensureTablesExist(); } void ensureColumnExists(String column, String columnType) throws SqlException { if (columnNames().contains(column)) return; issue("alter table " + name + " add column " + column + " " + columnType, newContext()); } private Optional<Long> adjustOrDiscard(Instant at) { long timestamp = at.toEpochMilli(); if (timestamp >= highestTimestampAdded) { highestTimestampAdded = timestamp; return Optional.of(timestamp); } if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded); return Optional.empty(); } private List<String> columnNames() throws SqlException { var context = newContext(); List<String> columns = new ArrayList<>(); try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) { try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { columns.add(record.getStr(0).toString()); } } } return columns; } } }
Betyr dette at default action er retirement?
private HostAction nextAction(Node node, VespaChangeRequest changeRequest, boolean spareCapacity) { var hostAction = getPreviousAction(node, changeRequest) .orElse(new HostAction(node.hostname().value(), State.PENDING_RETIREMENT, Instant.now())); if (node.type() != NodeType.host || !spareCapacity) { return hostAction.withState(State.REQUIRES_OPERATOR_ACTION); } if (changeRequest.getChangeRequestSource().isClosed()) { recycleNode(changeRequest.getZoneId(), node); return hostAction.withState(State.COMPLETE); } if (shouldRetire(changeRequest, hostAction)) { if (node.state() != Node.State.active) return hostAction.withState(State.RETIRED); if (!node.wantToRetire()) { logger.info(String.format("Retiring %s due to %s", node.hostname().value(), changeRequest.getChangeRequestSource().getId())); setWantToRetire(changeRequest.getZoneId(), node, true); } return hostAction.withState(State.RETIRING); } return hostAction; }
.orElse(new HostAction(node.hostname().value(), State.PENDING_RETIREMENT, Instant.now()));
private HostAction nextAction(Node node, VespaChangeRequest changeRequest, boolean spareCapacity) { var hostAction = getPreviousAction(node, changeRequest) .orElse(new HostAction(node.hostname().value(), State.NONE, Instant.now())); if (changeRequest.getChangeRequestSource().isClosed()) { recycleNode(changeRequest.getZoneId(), node, hostAction); return hostAction.withState(State.COMPLETE); } if (node.type() != NodeType.host || !spareCapacity) { return hostAction.withState(State.REQUIRES_OPERATOR_ACTION); } if (shouldRetire(changeRequest, hostAction)) { if (!node.wantToRetire()) { logger.info(String.format("Retiring %s due to %s", node.hostname().value(), changeRequest.getChangeRequestSource().getId())); setWantToRetire(changeRequest.getZoneId(), node, true); } return hostAction.withState(State.RETIRING); } if (hasRetired(node, hostAction)) { return hostAction.withState(State.RETIRED); } if (pendingRetirement(node)) { return hostAction.withState(State.PENDING_RETIREMENT); } return hostAction; }
class VCMRMaintainer extends ControllerMaintainer { private final Logger logger = Logger.getLogger(VCMRMaintainer.class.getName()); private final CuratorDb curator; private final NodeRepository nodeRepository; public VCMRMaintainer(Controller controller, Duration interval) { super(controller, interval, null, EnumSet.of(SystemName.main)); this.curator = controller.curator(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override protected boolean maintain() { var changeRequests = curator.readChangeRequests() .stream() .filter(shouldUpdate()) .collect(Collectors.toList()); var nodesByZone = nodesByZone(); changeRequests.forEach(changeRequest -> { var nodes = impactedNodes(nodesByZone, changeRequest); var nextActions = getNextActions(nodes, changeRequest); var status = getStatus(nextActions, changeRequest); try (var lock = curator.lockChangeRequests()) { curator.writeChangeRequest( changeRequest .withActionPlan(nextActions) .withStatus(status)); } }); return true; } /** * Status is based on: * 1. Whether the source has reportedly closed the request * 2. Whether any host requires operator action * 3. Whether any host has started/finished retiring */ private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) { if (changeRequest.getChangeRequestSource().isClosed()) { return Status.COMPLETED; } var byActionState = nextActions.stream().collect(Collectors.groupingBy(HostAction::getState, Collectors.counting())); if (byActionState.getOrDefault(State.REQUIRES_OPERATOR_ACTION, 0L) > 0) { return Status.REQUIRES_OPERATOR_ACTION; } if (byActionState.getOrDefault(State.RETIRING, 0L) + byActionState.getOrDefault(State.RETIRED, 0L) > 0) { return Status.IN_PROGRESS; } return Status.PENDING_ACTION; } private List<HostAction> getNextActions(List<Node> nodes, VespaChangeRequest changeRequest) { var spareCapacity = hasSpareCapacity(changeRequest.getZoneId(), nodes); return nodes.stream() .map(node -> nextAction(node, changeRequest, spareCapacity)) .collect(Collectors.toList()); } private List<Node> impactedNodes(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) { return nodesByZone.get(changeRequest.getZoneId()) .stream() .filter(isImpacted(changeRequest)) .collect(Collectors.toList()); } private Optional<HostAction> getPreviousAction(Node node, VespaChangeRequest changeRequest) { return changeRequest.getHostActionPlan() .stream() .filter(hostAction -> hostAction.getHostname().equals(node.hostname().value())) .findFirst(); } private void recycleNode(ZoneId zoneId, Node node) { if (node.state() == Node.State.parked) { logger.info("Setting " + node.hostname() + " to dirty"); nodeRepository.setState(zoneId, NodeState.dirty, node.hostname().value()); } if (node.wantToRetire()) setWantToRetire(zoneId, node, false); } private boolean shouldRetire(VespaChangeRequest changeRequest, HostAction action) { return action.getState() == State.PENDING_RETIREMENT && changeRequest.getChangeRequestSource().getPlannedStartTime() .minus(Duration.ofDays(2)) .isBefore(ZonedDateTime.now()); } private Map<ZoneId, List<Node>> nodesByZone() { return controller().zoneRegistry() .zones() .reachable() .in(Environment.prod) .ids() .stream() .collect(Collectors.toMap( zone -> zone, zone -> nodeRepository.list(zone, false) )); } private Predicate<Node> isImpacted(VespaChangeRequest changeRequest) { return node -> changeRequest.getImpactedHosts().contains(node.hostname().value()) || node.switchHostname() .map(switchHostname -> changeRequest.getImpactedSwitches().contains(switchHostname)) .orElse(false); } private Predicate<VespaChangeRequest> shouldUpdate() { return changeRequest -> changeRequest.getStatus() != Status.COMPLETED && List.of(ChangeRequest.Impact.HIGH, ChangeRequest.Impact.VERY_HIGH).contains(changeRequest.getImpact()); } private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) { var tenantHosts = nodes.stream() .filter(node -> node.type() == NodeType.host) .map(Node::hostname) .collect(Collectors.toList()); return tenantHosts.isEmpty() || nodeRepository.isReplaceable(zoneId, tenantHosts); } private void setWantToRetire(ZoneId zoneId, Node node, boolean wantToRetire) { var newNode = new NodeRepositoryNode(); newNode.setWantToRetire(wantToRetire); nodeRepository.patchNode(zoneId, node.hostname().value(), newNode); } }
class VCMRMaintainer extends ControllerMaintainer { private final Logger logger = Logger.getLogger(VCMRMaintainer.class.getName()); private final Duration ALLOWED_RETIREMENT_TIME = Duration.ofHours(60); private final CuratorDb curator; private final NodeRepository nodeRepository; public VCMRMaintainer(Controller controller, Duration interval) { super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic))); this.curator = controller.curator(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override protected boolean maintain() { var changeRequests = curator.readChangeRequests() .stream() .filter(shouldUpdate()) .collect(Collectors.toList()); var nodesByZone = nodesByZone(); changeRequests.forEach(changeRequest -> { var nodes = impactedNodes(nodesByZone, changeRequest); var nextActions = getNextActions(nodes, changeRequest); var status = getStatus(nextActions, changeRequest); try (var lock = curator.lockChangeRequests()) { curator.writeChangeRequest( changeRequest .withActionPlan(nextActions) .withStatus(status)); } }); return true; } /** * Status is based on: * 1. Whether the source has reportedly closed the request * 2. Whether any host requires operator action * 3. Whether any host has started/finished retiring */ private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) { if (changeRequest.getChangeRequestSource().isClosed()) { return Status.COMPLETED; } var byActionState = nextActions.stream().collect(Collectors.groupingBy(HostAction::getState, Collectors.counting())); if (byActionState.getOrDefault(State.REQUIRES_OPERATOR_ACTION, 0L) > 0) { return Status.REQUIRES_OPERATOR_ACTION; } if (byActionState.getOrDefault(State.RETIRING, 0L) + byActionState.getOrDefault(State.RETIRED, 0L) > 0) { return Status.IN_PROGRESS; } if (byActionState.getOrDefault(State.PENDING_RETIREMENT, 0L) > 0) { return Status.PENDING_ACTION; } return Status.NOOP; } private List<HostAction> getNextActions(List<Node> nodes, VespaChangeRequest changeRequest) { var spareCapacity = hasSpareCapacity(changeRequest.getZoneId(), nodes); return nodes.stream() .map(node -> nextAction(node, changeRequest, spareCapacity)) .collect(Collectors.toList()); } private List<Node> impactedNodes(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) { return nodesByZone.get(changeRequest.getZoneId()) .stream() .filter(isImpacted(changeRequest)) .collect(Collectors.toList()); } private Optional<HostAction> getPreviousAction(Node node, VespaChangeRequest changeRequest) { return changeRequest.getHostActionPlan() .stream() .filter(hostAction -> hostAction.getHostname().equals(node.hostname().value())) .findFirst(); } private void recycleNode(ZoneId zoneId, Node node, HostAction hostAction) { if (hostAction.getState() == State.RETIRED && node.state() == Node.State.parked) { logger.info("Setting " + node.hostname() + " to dirty"); nodeRepository.setState(zoneId, NodeState.dirty, node.hostname().value()); } if (hostAction.getState() == State.RETIRING && node.wantToRetire()) setWantToRetire(zoneId, node, false); } private boolean shouldRetire(VespaChangeRequest changeRequest, HostAction action) { return action.getState() == State.PENDING_RETIREMENT && changeRequest.getChangeRequestSource().getPlannedStartTime() .minus(ALLOWED_RETIREMENT_TIME) .isBefore(ZonedDateTime.now()); } private boolean hasRetired(Node node, HostAction hostAction) { return hostAction.getState() == State.RETIRING && node.state() == Node.State.parked; } /** * TODO: For now, we choose to retire any active host */ private boolean pendingRetirement(Node node) { return node.state() == Node.State.active; } private Map<ZoneId, List<Node>> nodesByZone() { return controller().zoneRegistry() .zones() .reachable() .in(Environment.prod) .ids() .stream() .collect(Collectors.toMap( zone -> zone, zone -> nodeRepository.list(zone, false) )); } private Predicate<Node> isImpacted(VespaChangeRequest changeRequest) { return node -> changeRequest.getImpactedHosts().contains(node.hostname().value()) || node.switchHostname() .map(switchHostname -> changeRequest.getImpactedSwitches().contains(switchHostname)) .orElse(false); } private Predicate<VespaChangeRequest> shouldUpdate() { return changeRequest -> changeRequest.getStatus() != Status.COMPLETED && List.of(Impact.HIGH, Impact.VERY_HIGH) .contains(changeRequest.getImpact()); } private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) { var tenantHosts = nodes.stream() .filter(node -> node.type() == NodeType.host) .map(Node::hostname) .collect(Collectors.toList()); return tenantHosts.isEmpty() || nodeRepository.isReplaceable(zoneId, tenantHosts); } private void setWantToRetire(ZoneId zoneId, Node node, boolean wantToRetire) { var newNode = new NodeRepositoryNode(); newNode.setWantToRetire(wantToRetire); nodeRepository.patchNode(zoneId, node.hostname().value(), newNode); } }
❤️
private void recycleNode(ZoneId zoneId, Node node, HostAction hostAction) { if (hostAction.getState() == State.RETIRED && node.state() == Node.State.parked) { logger.info("Setting " + node.hostname() + " to dirty"); nodeRepository.setState(zoneId, NodeState.dirty, node.hostname().value()); } if (hostAction.getState() == State.RETIRING && node.wantToRetire()) setWantToRetire(zoneId, node, false); }
setWantToRetire(zoneId, node, false);
private void recycleNode(ZoneId zoneId, Node node, HostAction hostAction) { if (hostAction.getState() == State.RETIRED && node.state() == Node.State.parked) { logger.info("Setting " + node.hostname() + " to dirty"); nodeRepository.setState(zoneId, NodeState.dirty, node.hostname().value()); } if (hostAction.getState() == State.RETIRING && node.wantToRetire()) setWantToRetire(zoneId, node, false); }
class VCMRMaintainer extends ControllerMaintainer { private final Logger logger = Logger.getLogger(VCMRMaintainer.class.getName()); private final CuratorDb curator; private final NodeRepository nodeRepository; public VCMRMaintainer(Controller controller, Duration interval) { super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic))); this.curator = controller.curator(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override protected boolean maintain() { var changeRequests = curator.readChangeRequests() .stream() .filter(shouldUpdate()) .collect(Collectors.toList()); var nodesByZone = nodesByZone(); changeRequests.forEach(changeRequest -> { var nodes = impactedNodes(nodesByZone, changeRequest); var nextActions = getNextActions(nodes, changeRequest); var status = getStatus(nextActions, changeRequest); try (var lock = curator.lockChangeRequests()) { curator.writeChangeRequest( changeRequest .withActionPlan(nextActions) .withStatus(status)); } }); return true; } /** * Status is based on: * 1. Whether the source has reportedly closed the request * 2. Whether any host requires operator action * 3. Whether any host has started/finished retiring */ private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) { if (changeRequest.getChangeRequestSource().isClosed()) { return Status.COMPLETED; } var byActionState = nextActions.stream().collect(Collectors.groupingBy(HostAction::getState, Collectors.counting())); if (byActionState.getOrDefault(State.REQUIRES_OPERATOR_ACTION, 0L) > 0) { return Status.REQUIRES_OPERATOR_ACTION; } if (byActionState.getOrDefault(State.RETIRING, 0L) + byActionState.getOrDefault(State.RETIRED, 0L) > 0) { return Status.IN_PROGRESS; } return Status.PENDING_ACTION; } private List<HostAction> getNextActions(List<Node> nodes, VespaChangeRequest changeRequest) { var spareCapacity = hasSpareCapacity(changeRequest.getZoneId(), nodes); return nodes.stream() .map(node -> nextAction(node, changeRequest, spareCapacity)) .collect(Collectors.toList()); } private List<Node> impactedNodes(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) { return nodesByZone.get(changeRequest.getZoneId()) .stream() .filter(isImpacted(changeRequest)) .collect(Collectors.toList()); } private Optional<HostAction> getPreviousAction(Node node, VespaChangeRequest changeRequest) { return changeRequest.getHostActionPlan() .stream() .filter(hostAction -> hostAction.getHostname().equals(node.hostname().value())) .findFirst(); } private HostAction nextAction(Node node, VespaChangeRequest changeRequest, boolean spareCapacity) { var hostAction = getPreviousAction(node, changeRequest) .orElse(new HostAction(node.hostname().value(), State.NONE, Instant.now())); if (changeRequest.getChangeRequestSource().isClosed()) { recycleNode(changeRequest.getZoneId(), node, hostAction); return hostAction.withState(State.COMPLETE); } if (node.type() != NodeType.host || !spareCapacity) { return hostAction.withState(State.REQUIRES_OPERATOR_ACTION); } if (shouldRetire(changeRequest, hostAction)) { if (!node.wantToRetire()) { logger.info(String.format("Retiring %s due to %s", node.hostname().value(), changeRequest.getChangeRequestSource().getId())); setWantToRetire(changeRequest.getZoneId(), node, true); } return hostAction.withState(State.RETIRING); } if (hasRetired(node, hostAction)) { return hostAction.withState(State.RETIRED); } if (pendingRetirement(node)) { return hostAction.withState(State.PENDING_RETIREMENT); } return hostAction; } private boolean shouldRetire(VespaChangeRequest changeRequest, HostAction action) { return action.getState() == State.PENDING_RETIREMENT && changeRequest.getChangeRequestSource().getPlannedStartTime() .minus(Duration.ofDays(2)) .isBefore(ZonedDateTime.now()); } private boolean hasRetired(Node node, HostAction hostAction) { return hostAction.getState() == State.RETIRING && node.state() == Node.State.parked; } /** * TODO: For now, we choose to retire any active host */ private boolean pendingRetirement(Node node) { return node.state() == Node.State.active; } private Map<ZoneId, List<Node>> nodesByZone() { return controller().zoneRegistry() .zones() .reachable() .in(Environment.prod) .ids() .stream() .collect(Collectors.toMap( zone -> zone, zone -> nodeRepository.list(zone, false) )); } private Predicate<Node> isImpacted(VespaChangeRequest changeRequest) { return node -> changeRequest.getImpactedHosts().contains(node.hostname().value()) || node.switchHostname() .map(switchHostname -> changeRequest.getImpactedSwitches().contains(switchHostname)) .orElse(false); } private Predicate<VespaChangeRequest> shouldUpdate() { return changeRequest -> changeRequest.getStatus() != Status.COMPLETED && List.of(Impact.HIGH, Impact.VERY_HIGH) .contains(changeRequest.getImpact()); } private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) { var tenantHosts = nodes.stream() .filter(node -> node.type() == NodeType.host) .map(Node::hostname) .collect(Collectors.toList()); return tenantHosts.isEmpty() || nodeRepository.isReplaceable(zoneId, tenantHosts); } private void setWantToRetire(ZoneId zoneId, Node node, boolean wantToRetire) { var newNode = new NodeRepositoryNode(); newNode.setWantToRetire(wantToRetire); nodeRepository.patchNode(zoneId, node.hostname().value(), newNode); } }
class VCMRMaintainer extends ControllerMaintainer { private final Logger logger = Logger.getLogger(VCMRMaintainer.class.getName()); private final Duration ALLOWED_RETIREMENT_TIME = Duration.ofHours(60); private final CuratorDb curator; private final NodeRepository nodeRepository; public VCMRMaintainer(Controller controller, Duration interval) { super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic))); this.curator = controller.curator(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override protected boolean maintain() { var changeRequests = curator.readChangeRequests() .stream() .filter(shouldUpdate()) .collect(Collectors.toList()); var nodesByZone = nodesByZone(); changeRequests.forEach(changeRequest -> { var nodes = impactedNodes(nodesByZone, changeRequest); var nextActions = getNextActions(nodes, changeRequest); var status = getStatus(nextActions, changeRequest); try (var lock = curator.lockChangeRequests()) { curator.writeChangeRequest( changeRequest .withActionPlan(nextActions) .withStatus(status)); } }); return true; } /** * Status is based on: * 1. Whether the source has reportedly closed the request * 2. Whether any host requires operator action * 3. Whether any host has started/finished retiring */ private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) { if (changeRequest.getChangeRequestSource().isClosed()) { return Status.COMPLETED; } var byActionState = nextActions.stream().collect(Collectors.groupingBy(HostAction::getState, Collectors.counting())); if (byActionState.getOrDefault(State.REQUIRES_OPERATOR_ACTION, 0L) > 0) { return Status.REQUIRES_OPERATOR_ACTION; } if (byActionState.getOrDefault(State.RETIRING, 0L) + byActionState.getOrDefault(State.RETIRED, 0L) > 0) { return Status.IN_PROGRESS; } if (byActionState.getOrDefault(State.PENDING_RETIREMENT, 0L) > 0) { return Status.PENDING_ACTION; } return Status.NOOP; } private List<HostAction> getNextActions(List<Node> nodes, VespaChangeRequest changeRequest) { var spareCapacity = hasSpareCapacity(changeRequest.getZoneId(), nodes); return nodes.stream() .map(node -> nextAction(node, changeRequest, spareCapacity)) .collect(Collectors.toList()); } private List<Node> impactedNodes(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) { return nodesByZone.get(changeRequest.getZoneId()) .stream() .filter(isImpacted(changeRequest)) .collect(Collectors.toList()); } private Optional<HostAction> getPreviousAction(Node node, VespaChangeRequest changeRequest) { return changeRequest.getHostActionPlan() .stream() .filter(hostAction -> hostAction.getHostname().equals(node.hostname().value())) .findFirst(); } private HostAction nextAction(Node node, VespaChangeRequest changeRequest, boolean spareCapacity) { var hostAction = getPreviousAction(node, changeRequest) .orElse(new HostAction(node.hostname().value(), State.NONE, Instant.now())); if (changeRequest.getChangeRequestSource().isClosed()) { recycleNode(changeRequest.getZoneId(), node, hostAction); return hostAction.withState(State.COMPLETE); } if (node.type() != NodeType.host || !spareCapacity) { return hostAction.withState(State.REQUIRES_OPERATOR_ACTION); } if (shouldRetire(changeRequest, hostAction)) { if (!node.wantToRetire()) { logger.info(String.format("Retiring %s due to %s", node.hostname().value(), changeRequest.getChangeRequestSource().getId())); setWantToRetire(changeRequest.getZoneId(), node, true); } return hostAction.withState(State.RETIRING); } if (hasRetired(node, hostAction)) { return hostAction.withState(State.RETIRED); } if (pendingRetirement(node)) { return hostAction.withState(State.PENDING_RETIREMENT); } return hostAction; } private boolean shouldRetire(VespaChangeRequest changeRequest, HostAction action) { return action.getState() == State.PENDING_RETIREMENT && changeRequest.getChangeRequestSource().getPlannedStartTime() .minus(ALLOWED_RETIREMENT_TIME) .isBefore(ZonedDateTime.now()); } private boolean hasRetired(Node node, HostAction hostAction) { return hostAction.getState() == State.RETIRING && node.state() == Node.State.parked; } /** * TODO: For now, we choose to retire any active host */ private boolean pendingRetirement(Node node) { return node.state() == Node.State.active; } private Map<ZoneId, List<Node>> nodesByZone() { return controller().zoneRegistry() .zones() .reachable() .in(Environment.prod) .ids() .stream() .collect(Collectors.toMap( zone -> zone, zone -> nodeRepository.list(zone, false) )); } private Predicate<Node> isImpacted(VespaChangeRequest changeRequest) { return node -> changeRequest.getImpactedHosts().contains(node.hostname().value()) || node.switchHostname() .map(switchHostname -> changeRequest.getImpactedSwitches().contains(switchHostname)) .orElse(false); } private Predicate<VespaChangeRequest> shouldUpdate() { return changeRequest -> changeRequest.getStatus() != Status.COMPLETED && List.of(Impact.HIGH, Impact.VERY_HIGH) .contains(changeRequest.getImpact()); } private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) { var tenantHosts = nodes.stream() .filter(node -> node.type() == NodeType.host) .map(Node::hostname) .collect(Collectors.toList()); return tenantHosts.isEmpty() || nodeRepository.isReplaceable(zoneId, tenantHosts); } private void setWantToRetire(ZoneId zoneId, Node node, boolean wantToRetire) { var newNode = new NodeRepositoryNode(); newNode.setWantToRetire(wantToRetire); nodeRepository.patchNode(zoneId, node.hostname().value(), newNode); } }
This looks correct, but is it used in one of the tests?
private boolean shouldRetire(VespaChangeRequest changeRequest, HostAction action) { return action.getState() == State.PENDING_RETIREMENT && changeRequest.getChangeRequestSource().getPlannedStartTime() .minus(Duration.ofDays(2)) .isBefore(ZonedDateTime.now()); }
.minus(Duration.ofDays(2))
private boolean shouldRetire(VespaChangeRequest changeRequest, HostAction action) { return action.getState() == State.PENDING_RETIREMENT && changeRequest.getChangeRequestSource().getPlannedStartTime() .minus(ALLOWED_RETIREMENT_TIME) .isBefore(ZonedDateTime.now()); }
class VCMRMaintainer extends ControllerMaintainer { private final Logger logger = Logger.getLogger(VCMRMaintainer.class.getName()); private final CuratorDb curator; private final NodeRepository nodeRepository; public VCMRMaintainer(Controller controller, Duration interval) { super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic))); this.curator = controller.curator(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override protected boolean maintain() { var changeRequests = curator.readChangeRequests() .stream() .filter(shouldUpdate()) .collect(Collectors.toList()); var nodesByZone = nodesByZone(); changeRequests.forEach(changeRequest -> { var nodes = impactedNodes(nodesByZone, changeRequest); var nextActions = getNextActions(nodes, changeRequest); var status = getStatus(nextActions, changeRequest); try (var lock = curator.lockChangeRequests()) { curator.writeChangeRequest( changeRequest .withActionPlan(nextActions) .withStatus(status)); } }); return true; } /** * Status is based on: * 1. Whether the source has reportedly closed the request * 2. Whether any host requires operator action * 3. Whether any host has started/finished retiring */ private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) { if (changeRequest.getChangeRequestSource().isClosed()) { return Status.COMPLETED; } var byActionState = nextActions.stream().collect(Collectors.groupingBy(HostAction::getState, Collectors.counting())); if (byActionState.getOrDefault(State.REQUIRES_OPERATOR_ACTION, 0L) > 0) { return Status.REQUIRES_OPERATOR_ACTION; } if (byActionState.getOrDefault(State.RETIRING, 0L) + byActionState.getOrDefault(State.RETIRED, 0L) > 0) { return Status.IN_PROGRESS; } return Status.PENDING_ACTION; } private List<HostAction> getNextActions(List<Node> nodes, VespaChangeRequest changeRequest) { var spareCapacity = hasSpareCapacity(changeRequest.getZoneId(), nodes); return nodes.stream() .map(node -> nextAction(node, changeRequest, spareCapacity)) .collect(Collectors.toList()); } private List<Node> impactedNodes(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) { return nodesByZone.get(changeRequest.getZoneId()) .stream() .filter(isImpacted(changeRequest)) .collect(Collectors.toList()); } private Optional<HostAction> getPreviousAction(Node node, VespaChangeRequest changeRequest) { return changeRequest.getHostActionPlan() .stream() .filter(hostAction -> hostAction.getHostname().equals(node.hostname().value())) .findFirst(); } private HostAction nextAction(Node node, VespaChangeRequest changeRequest, boolean spareCapacity) { var hostAction = getPreviousAction(node, changeRequest) .orElse(new HostAction(node.hostname().value(), State.NONE, Instant.now())); if (changeRequest.getChangeRequestSource().isClosed()) { recycleNode(changeRequest.getZoneId(), node, hostAction); return hostAction.withState(State.COMPLETE); } if (node.type() != NodeType.host || !spareCapacity) { return hostAction.withState(State.REQUIRES_OPERATOR_ACTION); } if (shouldRetire(changeRequest, hostAction)) { if (!node.wantToRetire()) { logger.info(String.format("Retiring %s due to %s", node.hostname().value(), changeRequest.getChangeRequestSource().getId())); setWantToRetire(changeRequest.getZoneId(), node, true); } return hostAction.withState(State.RETIRING); } if (hasRetired(node, hostAction)) { return hostAction.withState(State.RETIRED); } if (pendingRetirement(node)) { return hostAction.withState(State.PENDING_RETIREMENT); } return hostAction; } private void recycleNode(ZoneId zoneId, Node node, HostAction hostAction) { if (hostAction.getState() == State.RETIRED && node.state() == Node.State.parked) { logger.info("Setting " + node.hostname() + " to dirty"); nodeRepository.setState(zoneId, NodeState.dirty, node.hostname().value()); } if (hostAction.getState() == State.RETIRING && node.wantToRetire()) setWantToRetire(zoneId, node, false); } private boolean hasRetired(Node node, HostAction hostAction) { return hostAction.getState() == State.RETIRING && node.state() == Node.State.parked; } /** * TODO: For now, we choose to retire any active host */ private boolean pendingRetirement(Node node) { return node.state() == Node.State.active; } private Map<ZoneId, List<Node>> nodesByZone() { return controller().zoneRegistry() .zones() .reachable() .in(Environment.prod) .ids() .stream() .collect(Collectors.toMap( zone -> zone, zone -> nodeRepository.list(zone, false) )); } private Predicate<Node> isImpacted(VespaChangeRequest changeRequest) { return node -> changeRequest.getImpactedHosts().contains(node.hostname().value()) || node.switchHostname() .map(switchHostname -> changeRequest.getImpactedSwitches().contains(switchHostname)) .orElse(false); } private Predicate<VespaChangeRequest> shouldUpdate() { return changeRequest -> changeRequest.getStatus() != Status.COMPLETED && List.of(Impact.HIGH, Impact.VERY_HIGH) .contains(changeRequest.getImpact()); } private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) { var tenantHosts = nodes.stream() .filter(node -> node.type() == NodeType.host) .map(Node::hostname) .collect(Collectors.toList()); return tenantHosts.isEmpty() || nodeRepository.isReplaceable(zoneId, tenantHosts); } private void setWantToRetire(ZoneId zoneId, Node node, boolean wantToRetire) { var newNode = new NodeRepositoryNode(); newNode.setWantToRetire(wantToRetire); nodeRepository.patchNode(zoneId, node.hostname().value(), newNode); } }
class VCMRMaintainer extends ControllerMaintainer { private final Logger logger = Logger.getLogger(VCMRMaintainer.class.getName()); private final Duration ALLOWED_RETIREMENT_TIME = Duration.ofHours(60); private final CuratorDb curator; private final NodeRepository nodeRepository; public VCMRMaintainer(Controller controller, Duration interval) { super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic))); this.curator = controller.curator(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override protected boolean maintain() { var changeRequests = curator.readChangeRequests() .stream() .filter(shouldUpdate()) .collect(Collectors.toList()); var nodesByZone = nodesByZone(); changeRequests.forEach(changeRequest -> { var nodes = impactedNodes(nodesByZone, changeRequest); var nextActions = getNextActions(nodes, changeRequest); var status = getStatus(nextActions, changeRequest); try (var lock = curator.lockChangeRequests()) { curator.writeChangeRequest( changeRequest .withActionPlan(nextActions) .withStatus(status)); } }); return true; } /** * Status is based on: * 1. Whether the source has reportedly closed the request * 2. Whether any host requires operator action * 3. Whether any host has started/finished retiring */ private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) { if (changeRequest.getChangeRequestSource().isClosed()) { return Status.COMPLETED; } var byActionState = nextActions.stream().collect(Collectors.groupingBy(HostAction::getState, Collectors.counting())); if (byActionState.getOrDefault(State.REQUIRES_OPERATOR_ACTION, 0L) > 0) { return Status.REQUIRES_OPERATOR_ACTION; } if (byActionState.getOrDefault(State.RETIRING, 0L) + byActionState.getOrDefault(State.RETIRED, 0L) > 0) { return Status.IN_PROGRESS; } if (byActionState.getOrDefault(State.PENDING_RETIREMENT, 0L) > 0) { return Status.PENDING_ACTION; } return Status.NOOP; } private List<HostAction> getNextActions(List<Node> nodes, VespaChangeRequest changeRequest) { var spareCapacity = hasSpareCapacity(changeRequest.getZoneId(), nodes); return nodes.stream() .map(node -> nextAction(node, changeRequest, spareCapacity)) .collect(Collectors.toList()); } private List<Node> impactedNodes(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) { return nodesByZone.get(changeRequest.getZoneId()) .stream() .filter(isImpacted(changeRequest)) .collect(Collectors.toList()); } private Optional<HostAction> getPreviousAction(Node node, VespaChangeRequest changeRequest) { return changeRequest.getHostActionPlan() .stream() .filter(hostAction -> hostAction.getHostname().equals(node.hostname().value())) .findFirst(); } private HostAction nextAction(Node node, VespaChangeRequest changeRequest, boolean spareCapacity) { var hostAction = getPreviousAction(node, changeRequest) .orElse(new HostAction(node.hostname().value(), State.NONE, Instant.now())); if (changeRequest.getChangeRequestSource().isClosed()) { recycleNode(changeRequest.getZoneId(), node, hostAction); return hostAction.withState(State.COMPLETE); } if (node.type() != NodeType.host || !spareCapacity) { return hostAction.withState(State.REQUIRES_OPERATOR_ACTION); } if (shouldRetire(changeRequest, hostAction)) { if (!node.wantToRetire()) { logger.info(String.format("Retiring %s due to %s", node.hostname().value(), changeRequest.getChangeRequestSource().getId())); setWantToRetire(changeRequest.getZoneId(), node, true); } return hostAction.withState(State.RETIRING); } if (hasRetired(node, hostAction)) { return hostAction.withState(State.RETIRED); } if (pendingRetirement(node)) { return hostAction.withState(State.PENDING_RETIREMENT); } return hostAction; } private void recycleNode(ZoneId zoneId, Node node, HostAction hostAction) { if (hostAction.getState() == State.RETIRED && node.state() == Node.State.parked) { logger.info("Setting " + node.hostname() + " to dirty"); nodeRepository.setState(zoneId, NodeState.dirty, node.hostname().value()); } if (hostAction.getState() == State.RETIRING && node.wantToRetire()) setWantToRetire(zoneId, node, false); } private boolean hasRetired(Node node, HostAction hostAction) { return hostAction.getState() == State.RETIRING && node.state() == Node.State.parked; } /** * TODO: For now, we choose to retire any active host */ private boolean pendingRetirement(Node node) { return node.state() == Node.State.active; } private Map<ZoneId, List<Node>> nodesByZone() { return controller().zoneRegistry() .zones() .reachable() .in(Environment.prod) .ids() .stream() .collect(Collectors.toMap( zone -> zone, zone -> nodeRepository.list(zone, false) )); } private Predicate<Node> isImpacted(VespaChangeRequest changeRequest) { return node -> changeRequest.getImpactedHosts().contains(node.hostname().value()) || node.switchHostname() .map(switchHostname -> changeRequest.getImpactedSwitches().contains(switchHostname)) .orElse(false); } private Predicate<VespaChangeRequest> shouldUpdate() { return changeRequest -> changeRequest.getStatus() != Status.COMPLETED && List.of(Impact.HIGH, Impact.VERY_HIGH) .contains(changeRequest.getImpact()); } private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) { var tenantHosts = nodes.stream() .filter(node -> node.type() == NodeType.host) .map(Node::hostname) .collect(Collectors.toList()); return tenantHosts.isEmpty() || nodeRepository.isReplaceable(zoneId, tenantHosts); } private void setWantToRetire(ZoneId zoneId, Node node, boolean wantToRetire) { var newNode = new NodeRepositoryNode(); newNode.setWantToRetire(wantToRetire); nodeRepository.patchNode(zoneId, node.hostname().value(), newNode); } }
🤔
private List<ConnectionFactory> connectionFactoriesForHttps(Metric metric) { ConnectorConfig.ProxyProtocol proxyProtocolConfig = connectorConfig.proxyProtocol(); HttpConnectionFactory http1Factory = newHttp1ConnectionFactory(); if (connectorConfig.http2Enabled()) { HTTP2ServerConnectionFactory http2Factory = newHttp2ConnectionFactory(); ALPNServerConnectionFactory alpnFactory = newAlpnConnectionFactory(List.of(http1Factory, http2Factory), http1Factory); SslConnectionFactory sslFactory = newSslConnectionFactory(metric, alpnFactory); if (proxyProtocolConfig.enabled()) { if (proxyProtocolConfig.mixedMode()) { ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory); DetectorConnectionFactory detectorFactory = newDetectorConnectionFactory(sslFactory, proxyProtocolFactory); return List.of(detectorFactory, proxyProtocolFactory, sslFactory, alpnFactory, http1Factory, http2Factory); } else { ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory); return List.of(proxyProtocolFactory, sslFactory, alpnFactory, http1Factory, http2Factory); } } else { return List.of(sslFactory, alpnFactory, http1Factory, http2Factory); } } else { SslConnectionFactory sslFactory = newSslConnectionFactory(metric, http1Factory); if (proxyProtocolConfig.enabled()) { if (proxyProtocolConfig.mixedMode()) { ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory); DetectorConnectionFactory detectorFactory = newDetectorConnectionFactory(sslFactory, proxyProtocolFactory); return List.of(detectorFactory, proxyProtocolFactory, sslFactory, http1Factory); } else { ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory); return List.of(proxyProtocolFactory, sslFactory, http1Factory); } } else { return List.of(sslFactory, http1Factory); } } }
ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory);
private List<ConnectionFactory> connectionFactoriesForHttps(Metric metric) { ConnectorConfig.ProxyProtocol proxyProtocolConfig = connectorConfig.proxyProtocol(); HttpConnectionFactory http1Factory = newHttp1ConnectionFactory(); if (connectorConfig.http2Enabled()) { HTTP2ServerConnectionFactory http2Factory = newHttp2ConnectionFactory(); ALPNServerConnectionFactory alpnFactory = newAlpnConnectionFactory(List.of(http1Factory, http2Factory), http1Factory); SslConnectionFactory sslFactory = newSslConnectionFactory(metric, alpnFactory); if (proxyProtocolConfig.enabled()) { if (proxyProtocolConfig.mixedMode()) { ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory); DetectorConnectionFactory detectorFactory = newDetectorConnectionFactory(sslFactory, proxyProtocolFactory); return List.of(detectorFactory, proxyProtocolFactory, sslFactory, alpnFactory, http1Factory, http2Factory); } else { ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory); return List.of(proxyProtocolFactory, sslFactory, alpnFactory, http1Factory, http2Factory); } } else { return List.of(sslFactory, alpnFactory, http1Factory, http2Factory); } } else { SslConnectionFactory sslFactory = newSslConnectionFactory(metric, http1Factory); if (proxyProtocolConfig.enabled()) { if (proxyProtocolConfig.mixedMode()) { ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory); DetectorConnectionFactory detectorFactory = newDetectorConnectionFactory(sslFactory, proxyProtocolFactory); return List.of(detectorFactory, proxyProtocolFactory, sslFactory, http1Factory); } else { ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory); return List.of(proxyProtocolFactory, sslFactory, http1Factory); } } else { return List.of(sslFactory, http1Factory); } } }
class ConnectorFactory { private final ConnectorConfig connectorConfig; private final SslContextFactoryProvider sslContextFactoryProvider; @Inject public ConnectorFactory(ConnectorConfig connectorConfig, SslContextFactoryProvider sslContextFactoryProvider) { runtimeConnectorConfigValidation(connectorConfig); this.connectorConfig = connectorConfig; this.sslContextFactoryProvider = sslContextFactoryProvider; } private static void runtimeConnectorConfigValidation(ConnectorConfig config) { validateProxyProtocolConfiguration(config); validateSecureRedirectConfig(config); } private static void validateProxyProtocolConfiguration(ConnectorConfig config) { ConnectorConfig.ProxyProtocol proxyProtocolConfig = config.proxyProtocol(); if (proxyProtocolConfig.enabled()) { boolean tlsMixedModeEnabled = TransportSecurityUtils.getInsecureMixedMode() != MixedMode.DISABLED; if (!isSslEffectivelyEnabled(config) || tlsMixedModeEnabled) { throw new IllegalArgumentException("Proxy protocol can only be enabled if connector is effectively HTTPS only"); } } } private static void validateSecureRedirectConfig(ConnectorConfig config) { if (config.secureRedirect().enabled() && isSslEffectivelyEnabled(config)) { throw new IllegalArgumentException("Secure redirect can only be enabled on connectors without HTTPS"); } } public ConnectorConfig getConnectorConfig() { return connectorConfig; } public ServerConnector createConnector(final Metric metric, final Server server, JettyConnectionLogger connectionLogger) { ServerConnector connector = new JDiscServerConnector( connectorConfig, metric, server, connectionLogger, createConnectionFactories(metric).toArray(ConnectionFactory[]::new)); connector.setPort(connectorConfig.listenPort()); connector.setName(connectorConfig.name()); connector.setAcceptQueueSize(connectorConfig.acceptQueueSize()); connector.setReuseAddress(connectorConfig.reuseAddress()); connector.setIdleTimeout((long)(connectorConfig.idleTimeout() * 1000.0)); return connector; } private List<ConnectionFactory> createConnectionFactories(Metric metric) { if (!isSslEffectivelyEnabled(connectorConfig)) { return List.of(newHttp1ConnectionFactory()); } else if (connectorConfig.ssl().enabled()) { return connectionFactoriesForHttps(metric); } else if (TransportSecurityUtils.isTransportSecurityEnabled()) { switch (TransportSecurityUtils.getInsecureMixedMode()) { case TLS_CLIENT_MIXED_SERVER: case PLAINTEXT_CLIENT_MIXED_SERVER: return connectionFactoriesForHttpsMixedMode(metric); case DISABLED: return connectionFactoriesForHttps(metric); default: throw new IllegalStateException(); } } else { return List.of(newHttp1ConnectionFactory()); } } private List<ConnectionFactory> connectionFactoriesForHttpsMixedMode(Metric metric) { HttpConnectionFactory httpFactory = newHttp1ConnectionFactory(); SslConnectionFactory sslFactory = newSslConnectionFactory(metric, httpFactory); DetectorConnectionFactory detectorFactory = newDetectorConnectionFactory(sslFactory); return List.of(detectorFactory, httpFactory, sslFactory); } private HttpConfiguration newHttpConfiguration() { HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSendDateHeader(true); httpConfig.setSendServerVersion(false); httpConfig.setSendXPoweredBy(false); httpConfig.setHeaderCacheSize(connectorConfig.headerCacheSize()); httpConfig.setOutputBufferSize(connectorConfig.outputBufferSize()); httpConfig.setRequestHeaderSize(connectorConfig.requestHeaderSize()); httpConfig.setResponseHeaderSize(connectorConfig.responseHeaderSize()); if (isSslEffectivelyEnabled(connectorConfig)) { httpConfig.addCustomizer(new SecureRequestCustomizer()); } return httpConfig; } private HttpConnectionFactory newHttp1ConnectionFactory() { return new HttpConnectionFactory(newHttpConfiguration()); } private HTTP2ServerConnectionFactory newHttp2ConnectionFactory() { return new HTTP2ServerConnectionFactory(newHttpConfiguration()); } private SslConnectionFactory newSslConnectionFactory(Metric metric, ConnectionFactory wrappedFactory) { SslContextFactory ctxFactory = sslContextFactoryProvider.getInstance(connectorConfig.name(), connectorConfig.listenPort()); SslConnectionFactory connectionFactory = new SslConnectionFactory(ctxFactory, wrappedFactory.getProtocol()); connectionFactory.addBean(new SslHandshakeFailedListener(metric, connectorConfig.name(), connectorConfig.listenPort())); return connectionFactory; } private ALPNServerConnectionFactory newAlpnConnectionFactory(Collection<ConnectionFactory> alternatives, ConnectionFactory defaultFactory) { String[] protocols = alternatives.stream().map(ConnectionFactory::getProtocol).toArray(String[]::new); ALPNServerConnectionFactory factory = new ALPNServerConnectionFactory(protocols); factory.setDefaultProtocol(defaultFactory.getProtocol()); return factory; } private DetectorConnectionFactory newDetectorConnectionFactory(ConnectionFactory.Detecting... alternatives) { return new DetectorConnectionFactory(alternatives); } private ProxyConnectionFactory newProxyProtocolConnectionFactory(ConnectionFactory wrappedFactory) { return new ProxyConnectionFactory(wrappedFactory.getProtocol()); } private static boolean isSslEffectivelyEnabled(ConnectorConfig config) { return config.ssl().enabled() || (config.implicitTlsEnabled() && TransportSecurityUtils.isTransportSecurityEnabled()); } }
class ConnectorFactory { private final ConnectorConfig connectorConfig; private final SslContextFactoryProvider sslContextFactoryProvider; @Inject public ConnectorFactory(ConnectorConfig connectorConfig, SslContextFactoryProvider sslContextFactoryProvider) { runtimeConnectorConfigValidation(connectorConfig); this.connectorConfig = connectorConfig; this.sslContextFactoryProvider = sslContextFactoryProvider; } private static void runtimeConnectorConfigValidation(ConnectorConfig config) { validateProxyProtocolConfiguration(config); validateSecureRedirectConfig(config); } private static void validateProxyProtocolConfiguration(ConnectorConfig config) { ConnectorConfig.ProxyProtocol proxyProtocolConfig = config.proxyProtocol(); if (proxyProtocolConfig.enabled()) { boolean tlsMixedModeEnabled = TransportSecurityUtils.getInsecureMixedMode() != MixedMode.DISABLED; if (!isSslEffectivelyEnabled(config) || tlsMixedModeEnabled) { throw new IllegalArgumentException("Proxy protocol can only be enabled if connector is effectively HTTPS only"); } } } private static void validateSecureRedirectConfig(ConnectorConfig config) { if (config.secureRedirect().enabled() && isSslEffectivelyEnabled(config)) { throw new IllegalArgumentException("Secure redirect can only be enabled on connectors without HTTPS"); } } public ConnectorConfig getConnectorConfig() { return connectorConfig; } public ServerConnector createConnector(final Metric metric, final Server server, JettyConnectionLogger connectionLogger) { ServerConnector connector = new JDiscServerConnector( connectorConfig, metric, server, connectionLogger, createConnectionFactories(metric).toArray(ConnectionFactory[]::new)); connector.setPort(connectorConfig.listenPort()); connector.setName(connectorConfig.name()); connector.setAcceptQueueSize(connectorConfig.acceptQueueSize()); connector.setReuseAddress(connectorConfig.reuseAddress()); connector.setIdleTimeout((long)(connectorConfig.idleTimeout() * 1000.0)); return connector; } private List<ConnectionFactory> createConnectionFactories(Metric metric) { if (!isSslEffectivelyEnabled(connectorConfig)) { return List.of(newHttp1ConnectionFactory()); } else if (connectorConfig.ssl().enabled()) { return connectionFactoriesForHttps(metric); } else if (TransportSecurityUtils.isTransportSecurityEnabled()) { switch (TransportSecurityUtils.getInsecureMixedMode()) { case TLS_CLIENT_MIXED_SERVER: case PLAINTEXT_CLIENT_MIXED_SERVER: return connectionFactoriesForHttpsMixedMode(metric); case DISABLED: return connectionFactoriesForHttps(metric); default: throw new IllegalStateException(); } } else { return List.of(newHttp1ConnectionFactory()); } } private List<ConnectionFactory> connectionFactoriesForHttpsMixedMode(Metric metric) { HttpConnectionFactory httpFactory = newHttp1ConnectionFactory(); SslConnectionFactory sslFactory = newSslConnectionFactory(metric, httpFactory); DetectorConnectionFactory detectorFactory = newDetectorConnectionFactory(sslFactory); return List.of(detectorFactory, httpFactory, sslFactory); } private HttpConfiguration newHttpConfiguration() { HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSendDateHeader(true); httpConfig.setSendServerVersion(false); httpConfig.setSendXPoweredBy(false); httpConfig.setHeaderCacheSize(connectorConfig.headerCacheSize()); httpConfig.setOutputBufferSize(connectorConfig.outputBufferSize()); httpConfig.setRequestHeaderSize(connectorConfig.requestHeaderSize()); httpConfig.setResponseHeaderSize(connectorConfig.responseHeaderSize()); if (isSslEffectivelyEnabled(connectorConfig)) { httpConfig.addCustomizer(new SecureRequestCustomizer()); } return httpConfig; } private HttpConnectionFactory newHttp1ConnectionFactory() { return new HttpConnectionFactory(newHttpConfiguration()); } private HTTP2ServerConnectionFactory newHttp2ConnectionFactory() { return new HTTP2ServerConnectionFactory(newHttpConfiguration()); } private SslConnectionFactory newSslConnectionFactory(Metric metric, ConnectionFactory wrappedFactory) { SslContextFactory ctxFactory = sslContextFactoryProvider.getInstance(connectorConfig.name(), connectorConfig.listenPort()); SslConnectionFactory connectionFactory = new SslConnectionFactory(ctxFactory, wrappedFactory.getProtocol()); connectionFactory.addBean(new SslHandshakeFailedListener(metric, connectorConfig.name(), connectorConfig.listenPort())); return connectionFactory; } private ALPNServerConnectionFactory newAlpnConnectionFactory(Collection<ConnectionFactory> alternatives, ConnectionFactory defaultFactory) { String[] protocols = alternatives.stream().map(ConnectionFactory::getProtocol).toArray(String[]::new); ALPNServerConnectionFactory factory = new ALPNServerConnectionFactory(protocols); factory.setDefaultProtocol(defaultFactory.getProtocol()); return factory; } private DetectorConnectionFactory newDetectorConnectionFactory(ConnectionFactory.Detecting... alternatives) { return new DetectorConnectionFactory(alternatives); } private ProxyConnectionFactory newProxyProtocolConnectionFactory(ConnectionFactory wrappedFactory) { return new ProxyConnectionFactory(wrappedFactory.getProtocol()); } private static boolean isSslEffectivelyEnabled(ConnectorConfig config) { return config.ssl().enabled() || (config.implicitTlsEnabled() && TransportSecurityUtils.isTransportSecurityEnabled()); } }
This method is now called unconditionally wrt. `lastSucceeded`, can this have any unintended side effects?
private void handleUpdate() { if (reqDone) { reqDone = false; boolean logOnSuccess = false; boolean logOnFailure = true; synchronized (this) { if (req.methodName().equals(UNREGISTER_METHOD_NAME)) { logOnSuccess = true; lastRegisterSucceeded.remove(name); } else { final Boolean lastSucceeded = lastRegisterSucceeded.get(name); if (lastSucceeded == null) { logOnSuccess = true; logOnFailure = false; } else if (lastSucceeded != !req.isError()) { logOnSuccess = true; } lastRegisterSucceeded.put(name, !req.isError()); } } if (req.isError()) { if (req.errorCode() != ErrorCode.METHOD_FAILED) { if (logOnFailure) { log.log(Level.INFO, logMessagePrefix() + " failed, will disconnect: " + req.errorMessage() + " (code " + req.errorCode() + ")"); } target.close(); target = null; } else { log.log(Level.WARNING, logMessagePrefix() + " failed: " + req.errorMessage()); } } else { log.log(logOnSuccess ? Level.INFO : Level.FINE, logMessagePrefix() + " completed successfully"); backOff.reset(); } req = null; name = null; } if (req != null) { log.log(Level.FINEST, "req in progress"); return; } if (target != null && ! slobroks.contains(currSlobrok)) { log.log(Level.INFO, "[RPC @ " + mySpec + "] location broker " + currSlobrok + " removed, will disconnect and use one of: "+slobroks); target.close(); target = null; } if (target == null) { currSlobrok = slobroks.nextSlobrokSpec(); if (currSlobrok == null) { double delay = backOff.get(); Level level = Level.FINE; if (backOff.shouldInform(delay)) level = Level.INFO; if (backOff.shouldWarn(delay)) level = Level.WARNING; log.log(level, "[RPC @ " + mySpec + "] no location brokers available, retrying: "+slobroks+" (in " + delay + " seconds)"); updateTask.schedule(delay); return; } lastRegisterSucceeded.clear(); target = orb.connect(new Spec(currSlobrok)); String namesString = null; final boolean logFine = log.isLoggable(Level.FINE); synchronized (this) { if (logFine) { namesString = names.toString(); } pending.clear(); pending.addAll(names); } if (logFine) { log.log(Level.FINE, "[RPC @ " + mySpec + "] Connect to location broker " + currSlobrok + " and reregister all service names: " + namesString); } } synchronized (this) { if (unreg.size() > 0) { name = unreg.remove(unreg.size() - 1); req = new Request(UNREGISTER_METHOD_NAME); } else if (pending.size() > 0) { name = pending.remove(pending.size() - 1); req = new Request(REGISTER_METHOD_NAME); } else { pending.addAll(names); log.log(Level.FINE, "[RPC @ " + mySpec + "] Reregister all service names in 30 seconds: " + names); updateTask.schedule(30.0); return; } } req.parameters().add(new StringValue(name)); req.parameters().add(new StringValue(mySpec)); log.log(Level.FINE, logMessagePrefix() + " now"); target.invokeAsync(req, 35.0, reqWait); }
lastRegisterSucceeded.put(name, !req.isError());
private void handleUpdate() { if (reqDone) { reqDone = false; boolean logOnSuccess = false; boolean logOnFailure = true; synchronized (this) { if (req.methodName().equals(UNREGISTER_METHOD_NAME)) { logOnSuccess = true; lastRegisterSucceeded.remove(name); } else { final Boolean lastSucceeded = lastRegisterSucceeded.get(name); if (lastSucceeded == null) { logOnSuccess = true; logOnFailure = false; } else if (lastSucceeded != !req.isError()) { logOnSuccess = true; } lastRegisterSucceeded.put(name, !req.isError()); } } if (req.isError()) { if (req.errorCode() != ErrorCode.METHOD_FAILED) { if (logOnFailure) { log.log(Level.INFO, logMessagePrefix() + " failed, will disconnect: " + req.errorMessage() + " (code " + req.errorCode() + ")"); } target.close(); target = null; } else { log.log(Level.WARNING, logMessagePrefix() + " failed: " + req.errorMessage()); } } else { log.log(logOnSuccess ? Level.INFO : Level.FINE, logMessagePrefix() + " completed successfully"); backOff.reset(); } req = null; name = null; } if (req != null) { log.log(Level.FINEST, "req in progress"); return; } if (target != null && ! slobroks.contains(currSlobrok)) { log.log(Level.INFO, "[RPC @ " + mySpec + "] location broker " + currSlobrok + " removed, will disconnect and use one of: "+slobroks); target.close(); target = null; } if (target == null) { currSlobrok = slobroks.nextSlobrokSpec(); if (currSlobrok == null) { double delay = backOff.get(); Level level = Level.FINE; if (backOff.shouldInform(delay)) level = Level.INFO; if (backOff.shouldWarn(delay)) level = Level.WARNING; log.log(level, "[RPC @ " + mySpec + "] no location brokers available, retrying: "+slobroks+" (in " + delay + " seconds)"); updateTask.schedule(delay); return; } lastRegisterSucceeded.clear(); target = orb.connect(new Spec(currSlobrok)); String namesString = null; final boolean logFine = log.isLoggable(Level.FINE); synchronized (this) { if (logFine) { namesString = names.toString(); } pending.clear(); pending.addAll(names); } if (logFine) { log.log(Level.FINE, "[RPC @ " + mySpec + "] Connect to location broker " + currSlobrok + " and reregister all service names: " + namesString); } } synchronized (this) { if (unreg.size() > 0) { name = unreg.remove(unreg.size() - 1); req = new Request(UNREGISTER_METHOD_NAME); } else if (pending.size() > 0) { name = pending.remove(pending.size() - 1); req = new Request(REGISTER_METHOD_NAME); } else { pending.addAll(names); log.log(Level.FINE, "[RPC @ " + mySpec + "] Reregister all service names in 30 seconds: " + names); updateTask.schedule(30.0); return; } } req.parameters().add(new StringValue(name)); req.parameters().add(new StringValue(mySpec)); log.log(Level.FINE, logMessagePrefix() + " now"); target.invokeAsync(req, 35.0, reqWait); }
class Register { private static Logger log = Logger.getLogger(Register.class.getName()); private static final String REGISTER_METHOD_NAME = "slobrok.registerRpcServer"; private static final String UNREGISTER_METHOD_NAME = "slobrok.unregisterRpcServer"; private Supervisor orb; private SlobrokList slobroks; private String currSlobrok; private final String mySpec; private BackOffPolicy backOff; private boolean reqDone = false; private List<String> names = new ArrayList<>(); private List<String> pending = new ArrayList<>(); private List<String> unreg = new ArrayList<>(); private final TransportThread transportThread; private Task updateTask = null; private RequestWaiter reqWait = null; private Target target = null; private Request req = null; private String name = null; private Method m_list = null; private Method m_unreg = null; /** Whether the last registerRpcServer for the name was a success, or null for the first. */ private final Map<String, Boolean> lastRegisterSucceeded = new HashMap<>(); /** * Remove all instances of name from list. */ private void discard(List<String> list, String name) { List<String> tmp = new ArrayList<>(); tmp.add(name); list.removeAll(tmp); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host * @param bop custom backoff policy, mostly useful for testing */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec, BackOffPolicy bop) { this.orb = orb; this.slobroks = slobroks; this.backOff = bop; mySpec = spec.toString(); transportThread = orb.transport().selectThread(); updateTask = transportThread.createTask(this::handleUpdate); reqWait = new RequestWaiter() { public void handleRequestDone(Request req) { reqDone = true; updateTask.scheduleNow(); } }; m_list = new Method("slobrok.callback.listNamesServed", "", "S", new MethodHandler() { public void invoke(Request req) { handleRpcList(req); } }) .methodDesc("List rpcserver names") .returnDesc(0, "names", "The rpcserver names this server wants to serve"); orb.addMethod(m_list); m_unreg = new Method("slobrok.callback.notifyUnregistered", "s", "", new MethodHandler() { public void invoke(Request req) { handleRpcUnreg(req); } }) .methodDesc("Notify a server about removed registration") .paramDesc(0, "name", "RpcServer name"); orb.addMethod(m_unreg); updateTask.scheduleNow(); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec) { this(orb, slobroks, spec, new BackOff()); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param myHost the hostname of this host * @param myPort the port number we are listening to */ public Register(Supervisor orb, SlobrokList slobroks, String myHost, int myPort) { this(orb, slobroks, new Spec(myHost, myPort)); } /** * Shut down the Register. This will close any open connections * and stop the regular re-registration. */ public void shutdown() { updateTask.kill(); transportThread.perform(this::handleShutdown); } /** * Register a service with the slobrok cluster. * * @param name service name */ public synchronized void registerName(String name) { if (names.indexOf(name) >= 0) { return; } names.add(name); pending.add(name); discard(unreg, name); updateTask.scheduleNow(); } /** * Unregister a service with the slobrok cluster * * @param name service name */ public synchronized void unregisterName(String name) { discard(names, name); discard(pending, name); unreg.add(name); updateTask.scheduleNow(); } /** * Invoked by the update task. **/ private String logMessagePrefix() { return "[RPC @ " + mySpec + "] " + (req.methodName().equals(UNREGISTER_METHOD_NAME) ? "unregistering " : "registering ") + name + " with location broker " + currSlobrok; } private synchronized void handleRpcList(Request req) { Values dst = req.returnValues(); dst.add(new StringArray(names.toArray(new String[names.size()]))); } private void handleRpcUnreg(Request req) { log.log(Level.WARNING, "unregistered name " + req.parameters().get(0).asString()); } /** * Invoked from the transport thread, requested by the shutdown * method. **/ private void handleShutdown() { orb.removeMethod(m_list); orb.removeMethod(m_unreg); if (req != null) { req.abort(); req = null; } if (target != null) { target.close(); target = null; } } }
class Register { private static Logger log = Logger.getLogger(Register.class.getName()); private static final String REGISTER_METHOD_NAME = "slobrok.registerRpcServer"; private static final String UNREGISTER_METHOD_NAME = "slobrok.unregisterRpcServer"; private Supervisor orb; private SlobrokList slobroks; private String currSlobrok; private final String mySpec; private BackOffPolicy backOff; private boolean reqDone = false; private List<String> names = new ArrayList<>(); private List<String> pending = new ArrayList<>(); private List<String> unreg = new ArrayList<>(); private final TransportThread transportThread; private Task updateTask = null; private RequestWaiter reqWait = null; private Target target = null; private Request req = null; private String name = null; private Method m_list = null; private Method m_unreg = null; /** Whether the last registerRpcServer for the name was a success, or null for the first. */ private final Map<String, Boolean> lastRegisterSucceeded = new HashMap<>(); /** * Remove all instances of name from list. */ private void discard(List<String> list, String name) { List<String> tmp = new ArrayList<>(); tmp.add(name); list.removeAll(tmp); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host * @param bop custom backoff policy, mostly useful for testing */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec, BackOffPolicy bop) { this.orb = orb; this.slobroks = slobroks; this.backOff = bop; mySpec = spec.toString(); transportThread = orb.transport().selectThread(); updateTask = transportThread.createTask(this::handleUpdate); reqWait = new RequestWaiter() { public void handleRequestDone(Request req) { reqDone = true; updateTask.scheduleNow(); } }; m_list = new Method("slobrok.callback.listNamesServed", "", "S", new MethodHandler() { public void invoke(Request req) { handleRpcList(req); } }) .methodDesc("List rpcserver names") .returnDesc(0, "names", "The rpcserver names this server wants to serve"); orb.addMethod(m_list); m_unreg = new Method("slobrok.callback.notifyUnregistered", "s", "", new MethodHandler() { public void invoke(Request req) { handleRpcUnreg(req); } }) .methodDesc("Notify a server about removed registration") .paramDesc(0, "name", "RpcServer name"); orb.addMethod(m_unreg); updateTask.scheduleNow(); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec) { this(orb, slobroks, spec, new BackOff()); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param myHost the hostname of this host * @param myPort the port number we are listening to */ public Register(Supervisor orb, SlobrokList slobroks, String myHost, int myPort) { this(orb, slobroks, new Spec(myHost, myPort)); } /** * Shut down the Register. This will close any open connections * and stop the regular re-registration. */ public void shutdown() { updateTask.kill(); transportThread.perform(this::handleShutdown); } /** * Register a service with the slobrok cluster. * * @param name service name */ public synchronized void registerName(String name) { if (names.indexOf(name) >= 0) { return; } names.add(name); pending.add(name); discard(unreg, name); updateTask.scheduleNow(); } /** * Unregister a service with the slobrok cluster * * @param name service name */ public synchronized void unregisterName(String name) { discard(names, name); discard(pending, name); unreg.add(name); updateTask.scheduleNow(); } /** * Invoked by the update task. **/ private String logMessagePrefix() { return "[RPC @ " + mySpec + "] " + (req.methodName().equals(UNREGISTER_METHOD_NAME) ? "unregistering " : "registering ") + name + " with location broker " + currSlobrok; } private synchronized void handleRpcList(Request req) { Values dst = req.returnValues(); dst.add(new StringArray(names.toArray(new String[names.size()]))); } private void handleRpcUnreg(Request req) { log.log(Level.WARNING, "unregistered name " + req.parameters().get(0).asString()); } /** * Invoked from the transport thread, requested by the shutdown * method. **/ private void handleShutdown() { orb.removeMethod(m_list); orb.removeMethod(m_unreg); if (req != null) { req.abort(); req = null; } if (target != null) { target.close(); target = null; } } }
> This method is now called unconditionally wrt. lastSucceeded, can this have any unintended side effects? no - if it already had the value from before, we will overwrite with same value, to avoid duplicating this line.
private void handleUpdate() { if (reqDone) { reqDone = false; boolean logOnSuccess = false; boolean logOnFailure = true; synchronized (this) { if (req.methodName().equals(UNREGISTER_METHOD_NAME)) { logOnSuccess = true; lastRegisterSucceeded.remove(name); } else { final Boolean lastSucceeded = lastRegisterSucceeded.get(name); if (lastSucceeded == null) { logOnSuccess = true; logOnFailure = false; } else if (lastSucceeded != !req.isError()) { logOnSuccess = true; } lastRegisterSucceeded.put(name, !req.isError()); } } if (req.isError()) { if (req.errorCode() != ErrorCode.METHOD_FAILED) { if (logOnFailure) { log.log(Level.INFO, logMessagePrefix() + " failed, will disconnect: " + req.errorMessage() + " (code " + req.errorCode() + ")"); } target.close(); target = null; } else { log.log(Level.WARNING, logMessagePrefix() + " failed: " + req.errorMessage()); } } else { log.log(logOnSuccess ? Level.INFO : Level.FINE, logMessagePrefix() + " completed successfully"); backOff.reset(); } req = null; name = null; } if (req != null) { log.log(Level.FINEST, "req in progress"); return; } if (target != null && ! slobroks.contains(currSlobrok)) { log.log(Level.INFO, "[RPC @ " + mySpec + "] location broker " + currSlobrok + " removed, will disconnect and use one of: "+slobroks); target.close(); target = null; } if (target == null) { currSlobrok = slobroks.nextSlobrokSpec(); if (currSlobrok == null) { double delay = backOff.get(); Level level = Level.FINE; if (backOff.shouldInform(delay)) level = Level.INFO; if (backOff.shouldWarn(delay)) level = Level.WARNING; log.log(level, "[RPC @ " + mySpec + "] no location brokers available, retrying: "+slobroks+" (in " + delay + " seconds)"); updateTask.schedule(delay); return; } lastRegisterSucceeded.clear(); target = orb.connect(new Spec(currSlobrok)); String namesString = null; final boolean logFine = log.isLoggable(Level.FINE); synchronized (this) { if (logFine) { namesString = names.toString(); } pending.clear(); pending.addAll(names); } if (logFine) { log.log(Level.FINE, "[RPC @ " + mySpec + "] Connect to location broker " + currSlobrok + " and reregister all service names: " + namesString); } } synchronized (this) { if (unreg.size() > 0) { name = unreg.remove(unreg.size() - 1); req = new Request(UNREGISTER_METHOD_NAME); } else if (pending.size() > 0) { name = pending.remove(pending.size() - 1); req = new Request(REGISTER_METHOD_NAME); } else { pending.addAll(names); log.log(Level.FINE, "[RPC @ " + mySpec + "] Reregister all service names in 30 seconds: " + names); updateTask.schedule(30.0); return; } } req.parameters().add(new StringValue(name)); req.parameters().add(new StringValue(mySpec)); log.log(Level.FINE, logMessagePrefix() + " now"); target.invokeAsync(req, 35.0, reqWait); }
lastRegisterSucceeded.put(name, !req.isError());
private void handleUpdate() { if (reqDone) { reqDone = false; boolean logOnSuccess = false; boolean logOnFailure = true; synchronized (this) { if (req.methodName().equals(UNREGISTER_METHOD_NAME)) { logOnSuccess = true; lastRegisterSucceeded.remove(name); } else { final Boolean lastSucceeded = lastRegisterSucceeded.get(name); if (lastSucceeded == null) { logOnSuccess = true; logOnFailure = false; } else if (lastSucceeded != !req.isError()) { logOnSuccess = true; } lastRegisterSucceeded.put(name, !req.isError()); } } if (req.isError()) { if (req.errorCode() != ErrorCode.METHOD_FAILED) { if (logOnFailure) { log.log(Level.INFO, logMessagePrefix() + " failed, will disconnect: " + req.errorMessage() + " (code " + req.errorCode() + ")"); } target.close(); target = null; } else { log.log(Level.WARNING, logMessagePrefix() + " failed: " + req.errorMessage()); } } else { log.log(logOnSuccess ? Level.INFO : Level.FINE, logMessagePrefix() + " completed successfully"); backOff.reset(); } req = null; name = null; } if (req != null) { log.log(Level.FINEST, "req in progress"); return; } if (target != null && ! slobroks.contains(currSlobrok)) { log.log(Level.INFO, "[RPC @ " + mySpec + "] location broker " + currSlobrok + " removed, will disconnect and use one of: "+slobroks); target.close(); target = null; } if (target == null) { currSlobrok = slobroks.nextSlobrokSpec(); if (currSlobrok == null) { double delay = backOff.get(); Level level = Level.FINE; if (backOff.shouldInform(delay)) level = Level.INFO; if (backOff.shouldWarn(delay)) level = Level.WARNING; log.log(level, "[RPC @ " + mySpec + "] no location brokers available, retrying: "+slobroks+" (in " + delay + " seconds)"); updateTask.schedule(delay); return; } lastRegisterSucceeded.clear(); target = orb.connect(new Spec(currSlobrok)); String namesString = null; final boolean logFine = log.isLoggable(Level.FINE); synchronized (this) { if (logFine) { namesString = names.toString(); } pending.clear(); pending.addAll(names); } if (logFine) { log.log(Level.FINE, "[RPC @ " + mySpec + "] Connect to location broker " + currSlobrok + " and reregister all service names: " + namesString); } } synchronized (this) { if (unreg.size() > 0) { name = unreg.remove(unreg.size() - 1); req = new Request(UNREGISTER_METHOD_NAME); } else if (pending.size() > 0) { name = pending.remove(pending.size() - 1); req = new Request(REGISTER_METHOD_NAME); } else { pending.addAll(names); log.log(Level.FINE, "[RPC @ " + mySpec + "] Reregister all service names in 30 seconds: " + names); updateTask.schedule(30.0); return; } } req.parameters().add(new StringValue(name)); req.parameters().add(new StringValue(mySpec)); log.log(Level.FINE, logMessagePrefix() + " now"); target.invokeAsync(req, 35.0, reqWait); }
class Register { private static Logger log = Logger.getLogger(Register.class.getName()); private static final String REGISTER_METHOD_NAME = "slobrok.registerRpcServer"; private static final String UNREGISTER_METHOD_NAME = "slobrok.unregisterRpcServer"; private Supervisor orb; private SlobrokList slobroks; private String currSlobrok; private final String mySpec; private BackOffPolicy backOff; private boolean reqDone = false; private List<String> names = new ArrayList<>(); private List<String> pending = new ArrayList<>(); private List<String> unreg = new ArrayList<>(); private final TransportThread transportThread; private Task updateTask = null; private RequestWaiter reqWait = null; private Target target = null; private Request req = null; private String name = null; private Method m_list = null; private Method m_unreg = null; /** Whether the last registerRpcServer for the name was a success, or null for the first. */ private final Map<String, Boolean> lastRegisterSucceeded = new HashMap<>(); /** * Remove all instances of name from list. */ private void discard(List<String> list, String name) { List<String> tmp = new ArrayList<>(); tmp.add(name); list.removeAll(tmp); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host * @param bop custom backoff policy, mostly useful for testing */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec, BackOffPolicy bop) { this.orb = orb; this.slobroks = slobroks; this.backOff = bop; mySpec = spec.toString(); transportThread = orb.transport().selectThread(); updateTask = transportThread.createTask(this::handleUpdate); reqWait = new RequestWaiter() { public void handleRequestDone(Request req) { reqDone = true; updateTask.scheduleNow(); } }; m_list = new Method("slobrok.callback.listNamesServed", "", "S", new MethodHandler() { public void invoke(Request req) { handleRpcList(req); } }) .methodDesc("List rpcserver names") .returnDesc(0, "names", "The rpcserver names this server wants to serve"); orb.addMethod(m_list); m_unreg = new Method("slobrok.callback.notifyUnregistered", "s", "", new MethodHandler() { public void invoke(Request req) { handleRpcUnreg(req); } }) .methodDesc("Notify a server about removed registration") .paramDesc(0, "name", "RpcServer name"); orb.addMethod(m_unreg); updateTask.scheduleNow(); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec) { this(orb, slobroks, spec, new BackOff()); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param myHost the hostname of this host * @param myPort the port number we are listening to */ public Register(Supervisor orb, SlobrokList slobroks, String myHost, int myPort) { this(orb, slobroks, new Spec(myHost, myPort)); } /** * Shut down the Register. This will close any open connections * and stop the regular re-registration. */ public void shutdown() { updateTask.kill(); transportThread.perform(this::handleShutdown); } /** * Register a service with the slobrok cluster. * * @param name service name */ public synchronized void registerName(String name) { if (names.indexOf(name) >= 0) { return; } names.add(name); pending.add(name); discard(unreg, name); updateTask.scheduleNow(); } /** * Unregister a service with the slobrok cluster * * @param name service name */ public synchronized void unregisterName(String name) { discard(names, name); discard(pending, name); unreg.add(name); updateTask.scheduleNow(); } /** * Invoked by the update task. **/ private String logMessagePrefix() { return "[RPC @ " + mySpec + "] " + (req.methodName().equals(UNREGISTER_METHOD_NAME) ? "unregistering " : "registering ") + name + " with location broker " + currSlobrok; } private synchronized void handleRpcList(Request req) { Values dst = req.returnValues(); dst.add(new StringArray(names.toArray(new String[names.size()]))); } private void handleRpcUnreg(Request req) { log.log(Level.WARNING, "unregistered name " + req.parameters().get(0).asString()); } /** * Invoked from the transport thread, requested by the shutdown * method. **/ private void handleShutdown() { orb.removeMethod(m_list); orb.removeMethod(m_unreg); if (req != null) { req.abort(); req = null; } if (target != null) { target.close(); target = null; } } }
class Register { private static Logger log = Logger.getLogger(Register.class.getName()); private static final String REGISTER_METHOD_NAME = "slobrok.registerRpcServer"; private static final String UNREGISTER_METHOD_NAME = "slobrok.unregisterRpcServer"; private Supervisor orb; private SlobrokList slobroks; private String currSlobrok; private final String mySpec; private BackOffPolicy backOff; private boolean reqDone = false; private List<String> names = new ArrayList<>(); private List<String> pending = new ArrayList<>(); private List<String> unreg = new ArrayList<>(); private final TransportThread transportThread; private Task updateTask = null; private RequestWaiter reqWait = null; private Target target = null; private Request req = null; private String name = null; private Method m_list = null; private Method m_unreg = null; /** Whether the last registerRpcServer for the name was a success, or null for the first. */ private final Map<String, Boolean> lastRegisterSucceeded = new HashMap<>(); /** * Remove all instances of name from list. */ private void discard(List<String> list, String name) { List<String> tmp = new ArrayList<>(); tmp.add(name); list.removeAll(tmp); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host * @param bop custom backoff policy, mostly useful for testing */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec, BackOffPolicy bop) { this.orb = orb; this.slobroks = slobroks; this.backOff = bop; mySpec = spec.toString(); transportThread = orb.transport().selectThread(); updateTask = transportThread.createTask(this::handleUpdate); reqWait = new RequestWaiter() { public void handleRequestDone(Request req) { reqDone = true; updateTask.scheduleNow(); } }; m_list = new Method("slobrok.callback.listNamesServed", "", "S", new MethodHandler() { public void invoke(Request req) { handleRpcList(req); } }) .methodDesc("List rpcserver names") .returnDesc(0, "names", "The rpcserver names this server wants to serve"); orb.addMethod(m_list); m_unreg = new Method("slobrok.callback.notifyUnregistered", "s", "", new MethodHandler() { public void invoke(Request req) { handleRpcUnreg(req); } }) .methodDesc("Notify a server about removed registration") .paramDesc(0, "name", "RpcServer name"); orb.addMethod(m_unreg); updateTask.scheduleNow(); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec) { this(orb, slobroks, spec, new BackOff()); } /** * Create a new Register using the given Supervisor, slobrok * connect specs, hostname and port * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param myHost the hostname of this host * @param myPort the port number we are listening to */ public Register(Supervisor orb, SlobrokList slobroks, String myHost, int myPort) { this(orb, slobroks, new Spec(myHost, myPort)); } /** * Shut down the Register. This will close any open connections * and stop the regular re-registration. */ public void shutdown() { updateTask.kill(); transportThread.perform(this::handleShutdown); } /** * Register a service with the slobrok cluster. * * @param name service name */ public synchronized void registerName(String name) { if (names.indexOf(name) >= 0) { return; } names.add(name); pending.add(name); discard(unreg, name); updateTask.scheduleNow(); } /** * Unregister a service with the slobrok cluster * * @param name service name */ public synchronized void unregisterName(String name) { discard(names, name); discard(pending, name); unreg.add(name); updateTask.scheduleNow(); } /** * Invoked by the update task. **/ private String logMessagePrefix() { return "[RPC @ " + mySpec + "] " + (req.methodName().equals(UNREGISTER_METHOD_NAME) ? "unregistering " : "registering ") + name + " with location broker " + currSlobrok; } private synchronized void handleRpcList(Request req) { Values dst = req.returnValues(); dst.add(new StringArray(names.toArray(new String[names.size()]))); } private void handleRpcUnreg(Request req) { log.log(Level.WARNING, "unregistered name " + req.parameters().get(0).asString()); } /** * Invoked from the transport thread, requested by the shutdown * method. **/ private void handleShutdown() { orb.removeMethod(m_list); orb.removeMethod(m_unreg); if (req != null) { req.abort(); req = null; } if (target != null) { target.close(); target = null; } } }
should maybe check `x[3]` vs `x{}` also
public void verifyJoin() { checkJoin("tensor()", "tensor()", "tensor()"); checkJoin("tensor()", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor()", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{},y{})", "tensor(y{},z{})", "tensor(x{},y{},z{})"); checkJoin("tensor(y{})", "tensor()", "tensor(y{})"); checkJoin("tensor(y{})", "tensor(y{})", "tensor(y{})"); checkJoin("tensor(a[10])", "tensor(a[10])", "tensor(a[10])"); checkJoin("tensor(a[10])", "tensor()", "tensor(a[10])"); checkJoin("tensor(a[10])", "tensor(x{},y{},z{})", "tensor(a[10],x{},y{},z{})"); checkJoin("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor()", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor<bfloat16>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<bfloat16>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<bfloat16>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor()", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor<float>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor<float>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<int8>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<int8>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor()", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])"); checkJoin("tensor(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor(x{})", "tensor<bfloat16>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{})", "tensor<float>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{})", "tensor<int8>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x[])", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x[])", "tensor(x{})"); checkJoinFails("tensor(x[3])", "tensor(x[5])"); checkJoinFails("tensor(x[5])", "tensor(x[3])"); }
checkJoin("tensor(x[])", "tensor(x{})", "tensor(x{})");
public void verifyJoin() { checkJoin("tensor()", "tensor()", "tensor()"); checkJoin("tensor()", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor()", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{},y{})", "tensor(y{},z{})", "tensor(x{},y{},z{})"); checkJoin("tensor(y{})", "tensor()", "tensor(y{})"); checkJoin("tensor(y{})", "tensor(y{})", "tensor(y{})"); checkJoin("tensor(a[10])", "tensor(a[10])", "tensor(a[10])"); checkJoin("tensor(a[10])", "tensor()", "tensor(a[10])"); checkJoin("tensor(a[10])", "tensor(x{},y{},z{})", "tensor(a[10],x{},y{},z{})"); checkJoin("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor()", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor<bfloat16>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<bfloat16>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<bfloat16>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor()", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor<float>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor<float>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<int8>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<int8>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor()", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])"); checkJoin("tensor(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor(x{})", "tensor<bfloat16>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{})", "tensor<float>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{})", "tensor<int8>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x[])", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x[3])", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x[])", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x[3])", "tensor(x{})"); checkJoinFails("tensor(x[3])", "tensor(x[5])"); checkJoinFails("tensor(x[5])", "tensor(x[3])"); }
class TypeResolverTestCase { private static List<String> mkl(String ...values) { return Arrays.asList(values); } @Test public void verifyMap() { checkMap("tensor()", "tensor()"); checkMap("tensor(x[10])", "tensor(x[10])"); checkMap("tensor(a[10],b[20],c[30])", "tensor(a[10],b[20],c[30])"); checkMap("tensor(y{})", "tensor(y{})"); checkMap("tensor(x[10],y{})", "tensor(x[10],y{})"); checkMap("tensor<float>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<float>(y{})", "tensor<float>(y{})"); checkMap("tensor<bfloat16>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMap("tensor<int8>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<int8>(y{})", "tensor<float>(y{})"); } @Test @Test public void verifyReduce() { checkFullReduce("tensor()"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkFullReduce("tensor(x[10],y[20],z[30])"); checkFullReduce("tensor<float>(x[10],y[20],z[30])"); checkFullReduce("tensor<bfloat16>(x[10],y[20],z[30])"); checkFullReduce("tensor<int8>(x[10],y[20],z[30])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])"); checkReduce("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkFullReduce("tensor(x[10],y{},z[30])"); checkFullReduce("tensor<float>(x[10],y{},z[30])"); checkFullReduce("tensor<bfloat16>(x[10],y{},z[30])"); checkFullReduce("tensor<int8>(x[10],y{},z[30])"); checkReduce("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor()", mkl("x"), "tensor()"); checkReduce("tensor(y{})", mkl("x"), "tensor(y{})"); checkReduce("tensor<float>(y[10])", mkl("x"), "tensor<float>(y[10])"); checkReduce("tensor<int8>(y[10])", mkl("x"), "tensor<float>(y[10])"); } @Test public void verifyMerge() { checkMerge("tensor(a[10])", "tensor(a[10])", "tensor(a[10])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor<bfloat16>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMerge("tensor<bfloat16>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor<float>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(y{})", "tensor<float>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<int8>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor()", "tensor()", "tensor()"); checkMerge("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])"); checkMerge("tensor(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor(x{})", "tensor(x{})", "tensor(x{})"); checkMerge("tensor(x{},y{})", "tensor<bfloat16>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(x{},y{})", "tensor<float>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(x{},y{})", "tensor<int8>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(y{})", "tensor(y{})", "tensor(y{})"); checkMerge("tensor(x{})", "tensor(x[5])", "tensor(x{})"); checkMergeFails("tensor(a[10])", "tensor()"); checkMergeFails("tensor(a[10])", "tensor(x{},y{},z{})"); checkMergeFails("tensor<bfloat16>(x[5])", "tensor()"); checkMergeFails("tensor<bfloat16>(x{})", "tensor()"); checkMergeFails("tensor<float>(x[5])", "tensor()"); checkMergeFails("tensor<float>(x{})", "tensor()"); checkMergeFails("tensor<int8>(x{})", "tensor()"); checkMergeFails("tensor()", "tensor<int8>(x[5])"); checkMergeFails("tensor()", "tensor(x{})"); checkMergeFails("tensor(x[3])", "tensor(x[5])"); checkMergeFails("tensor(x[5])", "tensor(x[3])"); checkMergeFails("tensor(x{})", "tensor()"); checkMergeFails("tensor(x{},y{})", "tensor(x{},z{})"); checkMergeFails("tensor(y{})", "tensor()"); } @Test public void verifyRename() { checkRename("tensor(x[10],y[20],z[30])", mkl("y"), mkl("a"), "tensor(a[20],x[10],z[30])"); checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})"); checkRename("tensor(x{},y[5])", mkl("x","y"), mkl("y","x"), "tensor(x[5],y{})"); checkRename("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), mkl("c", "a", "b"), "tensor(a[20],b[30],c[10])"); checkRename("tensor(x{})", mkl("x"), mkl("x"), "tensor(x{})"); checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})"); checkRename("tensor<float>(x{})", mkl("x"), mkl("y"), "tensor<float>(y{})"); checkRename("tensor<bfloat16>(x{})", mkl("x"), mkl("y"), "tensor<bfloat16>(y{})"); checkRename("tensor<int8>(x{})", mkl("x"), mkl("y"), "tensor<int8>(y{})"); checkRenameFails("tensor(x{})", mkl(), mkl()); checkRenameFails("tensor()", mkl(), mkl()); checkRenameFails("tensor(x{},y{})", mkl("x"), mkl("y","z")); checkRenameFails("tensor(x{},y{})", mkl("x","y"), mkl("z")); checkRenameFails("tensor()", mkl("a"), mkl("b")); checkRenameFails("tensor(x[10],y[20],z[30])", mkl("y","z"), mkl("a", "x")); } @Test public void verifyConcat() { checkConcat("tensor(y[7])", "tensor(x{})", "z", "tensor(x{},y[7],z[2])"); checkConcat("tensor()", "tensor()", "x", "tensor(x[2])"); checkConcat("tensor(x[2])", "tensor()", "x", "tensor(x[3])"); checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[2])", "tensor()", "y", "tensor(x[2],y[2])"); checkConcat("tensor(x[2])", "tensor(x[2])", "y", "tensor(x[2],y[2])"); checkConcat("tensor(x[2],y[2])", "tensor(x[3])", "x", "tensor(x[5],y[2])"); checkConcat("tensor(x[2],y[2])", "tensor(y[7])", "y", "tensor(x[2],y[9])"); checkConcat("tensor(x[5])", "tensor(y[7])", "z", "tensor(x[5],y[7],z[2])"); checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<float>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<int8>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<float>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<bfloat16>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<bfloat16>(x[5])"); checkConcat("tensor<bfloat16>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<int8>(x[3])", "tensor<int8>(x[2])", "x", "tensor<int8>(x[5])"); checkConcat("tensor(x[3])", "tensor()", "x", "tensor(x[4])"); checkConcat("tensor<float>(x[3])", "tensor()", "x", "tensor<float>(x[4])"); checkConcat("tensor<bfloat16>(x[3])", "tensor()", "x", "tensor<bfloat16>(x[4])"); checkConcat("tensor<int8>(x[3])", "tensor()", "x", "tensor<int8>(x[4])"); checkConcatFails("tensor(x{})", "tensor(x[2])", "x"); checkConcatFails("tensor(x{})", "tensor(x{})", "x"); checkConcatFails("tensor(x{})", "tensor()", "x"); checkConcatFails("tensor(x[3])", "tensor(x[2])", "y"); } @Test public void verifyPeek() { checkPeek("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<bfloat16>(x[10],y[20])"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<int8>(x[10],y[20])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<bfloat16>(y[20])"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<int8>(y[20])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])"); checkPeek("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<bfloat16>(x[10],y{})"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<int8>(x[10],y{})"); checkPeek("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<bfloat16>(y{})"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<int8>(y{})"); checkPeek("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkFullPeek("tensor(x[10],y[20],z[30])"); checkFullPeek("tensor<float>(x[10],y[20],z[30])"); checkFullPeek("tensor<bfloat16>(x[10],y[20],z[30])"); checkFullPeek("tensor<int8>(x[10],y[20],z[30])"); checkFullPeek("tensor(x[10],y{},z[30])"); checkFullPeek("tensor<float>(x[10],y{},z[30])"); checkFullPeek("tensor<bfloat16>(x[10],y{},z[30])"); checkFullPeek("tensor<int8>(x[10],y{},z[30])"); checkPeekFails("tensor()", mkl()); checkPeekFails("tensor()", mkl("x")); checkPeekFails("tensor(y{})", mkl("x")); checkPeekFails("tensor(y{})", mkl("y", "y")); checkPeekFails("tensor<float>(y[10])", mkl("x")); } @Test public void verifyCellCast() { checkCast("tensor(x[10],y[20],z[30])", TensorType.Value.FLOAT, "tensor<float>(x[10],y[20],z[30])"); checkCasts("tensor<double>(x[10])"); checkCasts("tensor<float>(x[10])"); checkCasts("tensor<bfloat16>(x[10])"); checkCasts("tensor<int8>(x[10])"); checkCasts("tensor<double>(x{})"); checkCasts("tensor<float>(x{})"); checkCasts("tensor<bfloat16>(x{})"); checkCasts("tensor<int8>(x{})"); checkCasts("tensor<double>(x{},y[5])"); checkCasts("tensor<float>(x{},y[5])"); checkCasts("tensor<bfloat16>(x{},y[5])"); checkCasts("tensor<int8>(x{},y[5])"); checkCast("tensor()", TensorType.Value.DOUBLE, "tensor()"); checkCastFails("tensor()", TensorType.Value.FLOAT); checkCastFails("tensor()", TensorType.Value.BFLOAT16); checkCastFails("tensor()", TensorType.Value.INT8); } private static void checkMap(String specA, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.map(a); assertEquals(expected, result.toString()); } private static void checkJoin(String specA, String specB, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.join(a, b); assertEquals(expected, result.toString()); } private static void checkJoinFails(String specA, String specB) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.join(a, b); System.err.println("join of "+a+" and "+b+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkReduce(String specA, List<String> dims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.reduce(a, dims); assertEquals(expected, result.toString()); } private static void checkFullReduce(String specA) { String expected = "tensor()"; List<String> dims = new ArrayList<>(); checkReduce(specA, dims, expected); var a = TensorType.fromSpec(specA); for (var dim : a.dimensions()) { dims.add(dim.name()); } checkReduce(specA, dims, expected); } private static void checkReduceFails(String specA, String dim) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.reduce(a, mkl(dim)); System.err.println("Reduce "+specA+" with dim "+dim+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkMerge(String specA, String specB, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.merge(a, b); assertEquals(expected, result.toString()); } private static void checkMergeFails(String specA, String specB) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.merge(a, b); System.err.println("merge of "+a+" and "+b+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkRename(String specA, List<String> fromDims, List<String> toDims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.rename(a, fromDims, toDims); assertEquals(expected, result.toString()); } private static void checkRenameFails(String specA, List<String> fromDims, List<String> toDims) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.rename(a, fromDims, toDims); System.err.println("rename "+a+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkConcat(String specA, String specB, String dim, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.concat(a, b, dim); assertEquals(expected, result.toString()); } private static void checkConcatFails(String specA, String specB, String dim) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.concat(a, b, dim); System.err.println("concat "+a+" and "+b+" along "+dim+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkPeek(String specA, List<String> dims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.peek(a, dims); assertEquals(expected, result.toString()); } private static void checkFullPeek(String specA) { String expected = "tensor()"; List<String> dims = new ArrayList<>(); var a = TensorType.fromSpec(specA); for (var dim : a.dimensions()) { dims.add(dim.name()); } checkPeek(specA, dims, expected); } private static void checkPeekFails(String specA, List<String> dims) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.peek(a, dims); System.err.println("Peek "+specA+" with dims "+dims+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkCast(String specA, TensorType.Value newValueType, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.cell_cast(a, newValueType); assertEquals(expected, result.toString()); } private static void checkCasts(String specA) { var a = TensorType.fromSpec(specA); for (var newValueType : TensorType.Value.values()) { var result = TypeResolver.cell_cast(a, newValueType); assertEquals(result.valueType(), newValueType); assertEquals(result.dimensions(), a.dimensions()); } } private static void checkCastFails(String specA, TensorType.Value newValueType) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.cell_cast(a, newValueType); System.err.println("cast of "+a+" to "+newValueType+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } }
class TypeResolverTestCase { private static List<String> mkl(String ...values) { return Arrays.asList(values); } @Test public void verifyMap() { checkMap("tensor()", "tensor()"); checkMap("tensor(x[10])", "tensor(x[10])"); checkMap("tensor(a[10],b[20],c[30])", "tensor(a[10],b[20],c[30])"); checkMap("tensor(y{})", "tensor(y{})"); checkMap("tensor(x[10],y{})", "tensor(x[10],y{})"); checkMap("tensor<float>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<float>(y{})", "tensor<float>(y{})"); checkMap("tensor<bfloat16>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMap("tensor<int8>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<int8>(y{})", "tensor<float>(y{})"); } @Test @Test public void verifyReduce() { checkFullReduce("tensor()"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkFullReduce("tensor(x[10],y[20],z[30])"); checkFullReduce("tensor<float>(x[10],y[20],z[30])"); checkFullReduce("tensor<bfloat16>(x[10],y[20],z[30])"); checkFullReduce("tensor<int8>(x[10],y[20],z[30])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])"); checkReduce("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkFullReduce("tensor(x[10],y{},z[30])"); checkFullReduce("tensor<float>(x[10],y{},z[30])"); checkFullReduce("tensor<bfloat16>(x[10],y{},z[30])"); checkFullReduce("tensor<int8>(x[10],y{},z[30])"); checkReduce("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor()", mkl("x"), "tensor()"); checkReduce("tensor(y{})", mkl("x"), "tensor(y{})"); checkReduce("tensor<float>(y[10])", mkl("x"), "tensor<float>(y[10])"); checkReduce("tensor<int8>(y[10])", mkl("x"), "tensor<float>(y[10])"); } @Test public void verifyMerge() { checkMerge("tensor(a[10])", "tensor(a[10])", "tensor(a[10])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor<bfloat16>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMerge("tensor<bfloat16>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor<float>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(y{})", "tensor<float>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<int8>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor()", "tensor()", "tensor()"); checkMerge("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])"); checkMerge("tensor(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor(x{})", "tensor(x{})", "tensor(x{})"); checkMerge("tensor(x{},y{})", "tensor<bfloat16>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(x{},y{})", "tensor<float>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(x{},y{})", "tensor<int8>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(y{})", "tensor(y{})", "tensor(y{})"); checkMerge("tensor(x{})", "tensor(x[5])", "tensor(x{})"); checkMergeFails("tensor(a[10])", "tensor()"); checkMergeFails("tensor(a[10])", "tensor(x{},y{},z{})"); checkMergeFails("tensor<bfloat16>(x[5])", "tensor()"); checkMergeFails("tensor<bfloat16>(x{})", "tensor()"); checkMergeFails("tensor<float>(x[5])", "tensor()"); checkMergeFails("tensor<float>(x{})", "tensor()"); checkMergeFails("tensor<int8>(x{})", "tensor()"); checkMergeFails("tensor()", "tensor<int8>(x[5])"); checkMergeFails("tensor()", "tensor(x{})"); checkMergeFails("tensor(x[3])", "tensor(x[5])"); checkMergeFails("tensor(x[5])", "tensor(x[3])"); checkMergeFails("tensor(x{})", "tensor()"); checkMergeFails("tensor(x{},y{})", "tensor(x{},z{})"); checkMergeFails("tensor(y{})", "tensor()"); } @Test public void verifyRename() { checkRename("tensor(x[10],y[20],z[30])", mkl("y"), mkl("a"), "tensor(a[20],x[10],z[30])"); checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})"); checkRename("tensor(x{},y[5])", mkl("x","y"), mkl("y","x"), "tensor(x[5],y{})"); checkRename("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), mkl("c", "a", "b"), "tensor(a[20],b[30],c[10])"); checkRename("tensor(x{})", mkl("x"), mkl("x"), "tensor(x{})"); checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})"); checkRename("tensor<float>(x{})", mkl("x"), mkl("y"), "tensor<float>(y{})"); checkRename("tensor<bfloat16>(x{})", mkl("x"), mkl("y"), "tensor<bfloat16>(y{})"); checkRename("tensor<int8>(x{})", mkl("x"), mkl("y"), "tensor<int8>(y{})"); checkRenameFails("tensor(x{})", mkl(), mkl()); checkRenameFails("tensor()", mkl(), mkl()); checkRenameFails("tensor(x{},y{})", mkl("x"), mkl("y","z")); checkRenameFails("tensor(x{},y{})", mkl("x","y"), mkl("z")); checkRenameFails("tensor()", mkl("a"), mkl("b")); checkRenameFails("tensor(x[10],y[20],z[30])", mkl("y","z"), mkl("a", "x")); } @Test public void verifyConcat() { checkConcat("tensor(y[7])", "tensor(x{})", "z", "tensor(x{},y[7],z[2])"); checkConcat("tensor()", "tensor()", "x", "tensor(x[2])"); checkConcat("tensor(x[2])", "tensor()", "x", "tensor(x[3])"); checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[2])", "tensor()", "y", "tensor(x[2],y[2])"); checkConcat("tensor(x[2])", "tensor(x[2])", "y", "tensor(x[2],y[2])"); checkConcat("tensor(x[2],y[2])", "tensor(x[3])", "x", "tensor(x[5],y[2])"); checkConcat("tensor(x[2],y[2])", "tensor(y[7])", "y", "tensor(x[2],y[9])"); checkConcat("tensor(x[5])", "tensor(y[7])", "z", "tensor(x[5],y[7],z[2])"); checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<float>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<int8>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<float>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<bfloat16>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<bfloat16>(x[5])"); checkConcat("tensor<bfloat16>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<int8>(x[3])", "tensor<int8>(x[2])", "x", "tensor<int8>(x[5])"); checkConcat("tensor(x[3])", "tensor()", "x", "tensor(x[4])"); checkConcat("tensor<float>(x[3])", "tensor()", "x", "tensor<float>(x[4])"); checkConcat("tensor<bfloat16>(x[3])", "tensor()", "x", "tensor<bfloat16>(x[4])"); checkConcat("tensor<int8>(x[3])", "tensor()", "x", "tensor<int8>(x[4])"); checkConcatFails("tensor(x{})", "tensor(x[2])", "x"); checkConcatFails("tensor(x{})", "tensor(x{})", "x"); checkConcatFails("tensor(x{})", "tensor()", "x"); checkConcatFails("tensor(x[3])", "tensor(x[2])", "y"); } @Test public void verifyPeek() { checkPeek("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<bfloat16>(x[10],y[20])"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<int8>(x[10],y[20])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<bfloat16>(y[20])"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<int8>(y[20])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])"); checkPeek("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<bfloat16>(x[10],y{})"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<int8>(x[10],y{})"); checkPeek("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<bfloat16>(y{})"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<int8>(y{})"); checkPeek("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkFullPeek("tensor(x[10],y[20],z[30])"); checkFullPeek("tensor<float>(x[10],y[20],z[30])"); checkFullPeek("tensor<bfloat16>(x[10],y[20],z[30])"); checkFullPeek("tensor<int8>(x[10],y[20],z[30])"); checkFullPeek("tensor(x[10],y{},z[30])"); checkFullPeek("tensor<float>(x[10],y{},z[30])"); checkFullPeek("tensor<bfloat16>(x[10],y{},z[30])"); checkFullPeek("tensor<int8>(x[10],y{},z[30])"); checkPeekFails("tensor()", mkl()); checkPeekFails("tensor()", mkl("x")); checkPeekFails("tensor(y{})", mkl("x")); checkPeekFails("tensor(y{})", mkl("y", "y")); checkPeekFails("tensor<float>(y[10])", mkl("x")); } @Test public void verifyCellCast() { checkCast("tensor(x[10],y[20],z[30])", TensorType.Value.FLOAT, "tensor<float>(x[10],y[20],z[30])"); checkCasts("tensor<double>(x[10])"); checkCasts("tensor<float>(x[10])"); checkCasts("tensor<bfloat16>(x[10])"); checkCasts("tensor<int8>(x[10])"); checkCasts("tensor<double>(x{})"); checkCasts("tensor<float>(x{})"); checkCasts("tensor<bfloat16>(x{})"); checkCasts("tensor<int8>(x{})"); checkCasts("tensor<double>(x{},y[5])"); checkCasts("tensor<float>(x{},y[5])"); checkCasts("tensor<bfloat16>(x{},y[5])"); checkCasts("tensor<int8>(x{},y[5])"); checkCast("tensor()", TensorType.Value.DOUBLE, "tensor()"); checkCastFails("tensor()", TensorType.Value.FLOAT); checkCastFails("tensor()", TensorType.Value.BFLOAT16); checkCastFails("tensor()", TensorType.Value.INT8); } private static void checkMap(String specA, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.map(a); assertEquals(expected, result.toString()); } private static void checkJoin(String specA, String specB, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.join(a, b); assertEquals(expected, result.toString()); } private static void checkJoinFails(String specA, String specB) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.join(a, b); System.err.println("join of "+a+" and "+b+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkReduce(String specA, List<String> dims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.reduce(a, dims); assertEquals(expected, result.toString()); } private static void checkFullReduce(String specA) { String expected = "tensor()"; List<String> dims = new ArrayList<>(); checkReduce(specA, dims, expected); var a = TensorType.fromSpec(specA); for (var dim : a.dimensions()) { dims.add(dim.name()); } checkReduce(specA, dims, expected); } private static void checkReduceFails(String specA, String dim) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.reduce(a, mkl(dim)); System.err.println("Reduce "+specA+" with dim "+dim+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkMerge(String specA, String specB, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.merge(a, b); assertEquals(expected, result.toString()); } private static void checkMergeFails(String specA, String specB) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.merge(a, b); System.err.println("merge of "+a+" and "+b+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkRename(String specA, List<String> fromDims, List<String> toDims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.rename(a, fromDims, toDims); assertEquals(expected, result.toString()); } private static void checkRenameFails(String specA, List<String> fromDims, List<String> toDims) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.rename(a, fromDims, toDims); System.err.println("rename "+a+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkConcat(String specA, String specB, String dim, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.concat(a, b, dim); assertEquals(expected, result.toString()); } private static void checkConcatFails(String specA, String specB, String dim) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.concat(a, b, dim); System.err.println("concat "+a+" and "+b+" along "+dim+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkPeek(String specA, List<String> dims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.peek(a, dims); assertEquals(expected, result.toString()); } private static void checkFullPeek(String specA) { String expected = "tensor()"; List<String> dims = new ArrayList<>(); var a = TensorType.fromSpec(specA); for (var dim : a.dimensions()) { dims.add(dim.name()); } checkPeek(specA, dims, expected); } private static void checkPeekFails(String specA, List<String> dims) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.peek(a, dims); System.err.println("Peek "+specA+" with dims "+dims+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkCast(String specA, TensorType.Value newValueType, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.cell_cast(a, newValueType); assertEquals(expected, result.toString()); } private static void checkCasts(String specA) { var a = TensorType.fromSpec(specA); for (var newValueType : TensorType.Value.values()) { var result = TypeResolver.cell_cast(a, newValueType); assertEquals(result.valueType(), newValueType); assertEquals(result.dimensions(), a.dimensions()); } } private static void checkCastFails(String specA, TensorType.Value newValueType) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.cell_cast(a, newValueType); System.err.println("cast of "+a+" to "+newValueType+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } }
Thanks!
public void verifyJoin() { checkJoin("tensor()", "tensor()", "tensor()"); checkJoin("tensor()", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor()", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{},y{})", "tensor(y{},z{})", "tensor(x{},y{},z{})"); checkJoin("tensor(y{})", "tensor()", "tensor(y{})"); checkJoin("tensor(y{})", "tensor(y{})", "tensor(y{})"); checkJoin("tensor(a[10])", "tensor(a[10])", "tensor(a[10])"); checkJoin("tensor(a[10])", "tensor()", "tensor(a[10])"); checkJoin("tensor(a[10])", "tensor(x{},y{},z{})", "tensor(a[10],x{},y{},z{})"); checkJoin("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor()", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor<bfloat16>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<bfloat16>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<bfloat16>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor()", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor<float>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor<float>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<int8>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<int8>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor()", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])"); checkJoin("tensor(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor(x{})", "tensor<bfloat16>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{})", "tensor<float>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{})", "tensor<int8>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x[])", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x[])", "tensor(x{})"); checkJoinFails("tensor(x[3])", "tensor(x[5])"); checkJoinFails("tensor(x[5])", "tensor(x[3])"); }
checkJoin("tensor(x[])", "tensor(x{})", "tensor(x{})");
public void verifyJoin() { checkJoin("tensor()", "tensor()", "tensor()"); checkJoin("tensor()", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor()", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{},y{})", "tensor(y{},z{})", "tensor(x{},y{},z{})"); checkJoin("tensor(y{})", "tensor()", "tensor(y{})"); checkJoin("tensor(y{})", "tensor(y{})", "tensor(y{})"); checkJoin("tensor(a[10])", "tensor(a[10])", "tensor(a[10])"); checkJoin("tensor(a[10])", "tensor()", "tensor(a[10])"); checkJoin("tensor(a[10])", "tensor(x{},y{},z{})", "tensor(a[10],x{},y{},z{})"); checkJoin("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor()", "tensor<float>(x[5])"); checkJoin("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor<bfloat16>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<bfloat16>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<bfloat16>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor()", "tensor<float>(x[5])"); checkJoin("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor<float>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor<float>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<float>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor<int8>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})"); checkJoin("tensor<int8>(x{})", "tensor()", "tensor<float>(x{})"); checkJoin("tensor()", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkJoin("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])"); checkJoin("tensor(x[5])", "tensor(x[5])", "tensor(x[5])"); checkJoin("tensor(x{})", "tensor<bfloat16>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{})", "tensor<float>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x{})", "tensor<int8>(y{})", "tensor(x{},y{})"); checkJoin("tensor(x[])", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x[3])", "tensor(x{})", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x[])", "tensor(x{})"); checkJoin("tensor(x{})", "tensor(x[3])", "tensor(x{})"); checkJoinFails("tensor(x[3])", "tensor(x[5])"); checkJoinFails("tensor(x[5])", "tensor(x[3])"); }
class TypeResolverTestCase { private static List<String> mkl(String ...values) { return Arrays.asList(values); } @Test public void verifyMap() { checkMap("tensor()", "tensor()"); checkMap("tensor(x[10])", "tensor(x[10])"); checkMap("tensor(a[10],b[20],c[30])", "tensor(a[10],b[20],c[30])"); checkMap("tensor(y{})", "tensor(y{})"); checkMap("tensor(x[10],y{})", "tensor(x[10],y{})"); checkMap("tensor<float>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<float>(y{})", "tensor<float>(y{})"); checkMap("tensor<bfloat16>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMap("tensor<int8>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<int8>(y{})", "tensor<float>(y{})"); } @Test @Test public void verifyReduce() { checkFullReduce("tensor()"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkFullReduce("tensor(x[10],y[20],z[30])"); checkFullReduce("tensor<float>(x[10],y[20],z[30])"); checkFullReduce("tensor<bfloat16>(x[10],y[20],z[30])"); checkFullReduce("tensor<int8>(x[10],y[20],z[30])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])"); checkReduce("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkFullReduce("tensor(x[10],y{},z[30])"); checkFullReduce("tensor<float>(x[10],y{},z[30])"); checkFullReduce("tensor<bfloat16>(x[10],y{},z[30])"); checkFullReduce("tensor<int8>(x[10],y{},z[30])"); checkReduce("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor()", mkl("x"), "tensor()"); checkReduce("tensor(y{})", mkl("x"), "tensor(y{})"); checkReduce("tensor<float>(y[10])", mkl("x"), "tensor<float>(y[10])"); checkReduce("tensor<int8>(y[10])", mkl("x"), "tensor<float>(y[10])"); } @Test public void verifyMerge() { checkMerge("tensor(a[10])", "tensor(a[10])", "tensor(a[10])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor<bfloat16>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMerge("tensor<bfloat16>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor<float>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(y{})", "tensor<float>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<int8>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor()", "tensor()", "tensor()"); checkMerge("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])"); checkMerge("tensor(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor(x{})", "tensor(x{})", "tensor(x{})"); checkMerge("tensor(x{},y{})", "tensor<bfloat16>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(x{},y{})", "tensor<float>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(x{},y{})", "tensor<int8>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(y{})", "tensor(y{})", "tensor(y{})"); checkMerge("tensor(x{})", "tensor(x[5])", "tensor(x{})"); checkMergeFails("tensor(a[10])", "tensor()"); checkMergeFails("tensor(a[10])", "tensor(x{},y{},z{})"); checkMergeFails("tensor<bfloat16>(x[5])", "tensor()"); checkMergeFails("tensor<bfloat16>(x{})", "tensor()"); checkMergeFails("tensor<float>(x[5])", "tensor()"); checkMergeFails("tensor<float>(x{})", "tensor()"); checkMergeFails("tensor<int8>(x{})", "tensor()"); checkMergeFails("tensor()", "tensor<int8>(x[5])"); checkMergeFails("tensor()", "tensor(x{})"); checkMergeFails("tensor(x[3])", "tensor(x[5])"); checkMergeFails("tensor(x[5])", "tensor(x[3])"); checkMergeFails("tensor(x{})", "tensor()"); checkMergeFails("tensor(x{},y{})", "tensor(x{},z{})"); checkMergeFails("tensor(y{})", "tensor()"); } @Test public void verifyRename() { checkRename("tensor(x[10],y[20],z[30])", mkl("y"), mkl("a"), "tensor(a[20],x[10],z[30])"); checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})"); checkRename("tensor(x{},y[5])", mkl("x","y"), mkl("y","x"), "tensor(x[5],y{})"); checkRename("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), mkl("c", "a", "b"), "tensor(a[20],b[30],c[10])"); checkRename("tensor(x{})", mkl("x"), mkl("x"), "tensor(x{})"); checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})"); checkRename("tensor<float>(x{})", mkl("x"), mkl("y"), "tensor<float>(y{})"); checkRename("tensor<bfloat16>(x{})", mkl("x"), mkl("y"), "tensor<bfloat16>(y{})"); checkRename("tensor<int8>(x{})", mkl("x"), mkl("y"), "tensor<int8>(y{})"); checkRenameFails("tensor(x{})", mkl(), mkl()); checkRenameFails("tensor()", mkl(), mkl()); checkRenameFails("tensor(x{},y{})", mkl("x"), mkl("y","z")); checkRenameFails("tensor(x{},y{})", mkl("x","y"), mkl("z")); checkRenameFails("tensor()", mkl("a"), mkl("b")); checkRenameFails("tensor(x[10],y[20],z[30])", mkl("y","z"), mkl("a", "x")); } @Test public void verifyConcat() { checkConcat("tensor(y[7])", "tensor(x{})", "z", "tensor(x{},y[7],z[2])"); checkConcat("tensor()", "tensor()", "x", "tensor(x[2])"); checkConcat("tensor(x[2])", "tensor()", "x", "tensor(x[3])"); checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[2])", "tensor()", "y", "tensor(x[2],y[2])"); checkConcat("tensor(x[2])", "tensor(x[2])", "y", "tensor(x[2],y[2])"); checkConcat("tensor(x[2],y[2])", "tensor(x[3])", "x", "tensor(x[5],y[2])"); checkConcat("tensor(x[2],y[2])", "tensor(y[7])", "y", "tensor(x[2],y[9])"); checkConcat("tensor(x[5])", "tensor(y[7])", "z", "tensor(x[5],y[7],z[2])"); checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<float>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<int8>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<float>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<bfloat16>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<bfloat16>(x[5])"); checkConcat("tensor<bfloat16>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<int8>(x[3])", "tensor<int8>(x[2])", "x", "tensor<int8>(x[5])"); checkConcat("tensor(x[3])", "tensor()", "x", "tensor(x[4])"); checkConcat("tensor<float>(x[3])", "tensor()", "x", "tensor<float>(x[4])"); checkConcat("tensor<bfloat16>(x[3])", "tensor()", "x", "tensor<bfloat16>(x[4])"); checkConcat("tensor<int8>(x[3])", "tensor()", "x", "tensor<int8>(x[4])"); checkConcatFails("tensor(x{})", "tensor(x[2])", "x"); checkConcatFails("tensor(x{})", "tensor(x{})", "x"); checkConcatFails("tensor(x{})", "tensor()", "x"); checkConcatFails("tensor(x[3])", "tensor(x[2])", "y"); } @Test public void verifyPeek() { checkPeek("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<bfloat16>(x[10],y[20])"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<int8>(x[10],y[20])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<bfloat16>(y[20])"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<int8>(y[20])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])"); checkPeek("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<bfloat16>(x[10],y{})"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<int8>(x[10],y{})"); checkPeek("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<bfloat16>(y{})"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<int8>(y{})"); checkPeek("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkFullPeek("tensor(x[10],y[20],z[30])"); checkFullPeek("tensor<float>(x[10],y[20],z[30])"); checkFullPeek("tensor<bfloat16>(x[10],y[20],z[30])"); checkFullPeek("tensor<int8>(x[10],y[20],z[30])"); checkFullPeek("tensor(x[10],y{},z[30])"); checkFullPeek("tensor<float>(x[10],y{},z[30])"); checkFullPeek("tensor<bfloat16>(x[10],y{},z[30])"); checkFullPeek("tensor<int8>(x[10],y{},z[30])"); checkPeekFails("tensor()", mkl()); checkPeekFails("tensor()", mkl("x")); checkPeekFails("tensor(y{})", mkl("x")); checkPeekFails("tensor(y{})", mkl("y", "y")); checkPeekFails("tensor<float>(y[10])", mkl("x")); } @Test public void verifyCellCast() { checkCast("tensor(x[10],y[20],z[30])", TensorType.Value.FLOAT, "tensor<float>(x[10],y[20],z[30])"); checkCasts("tensor<double>(x[10])"); checkCasts("tensor<float>(x[10])"); checkCasts("tensor<bfloat16>(x[10])"); checkCasts("tensor<int8>(x[10])"); checkCasts("tensor<double>(x{})"); checkCasts("tensor<float>(x{})"); checkCasts("tensor<bfloat16>(x{})"); checkCasts("tensor<int8>(x{})"); checkCasts("tensor<double>(x{},y[5])"); checkCasts("tensor<float>(x{},y[5])"); checkCasts("tensor<bfloat16>(x{},y[5])"); checkCasts("tensor<int8>(x{},y[5])"); checkCast("tensor()", TensorType.Value.DOUBLE, "tensor()"); checkCastFails("tensor()", TensorType.Value.FLOAT); checkCastFails("tensor()", TensorType.Value.BFLOAT16); checkCastFails("tensor()", TensorType.Value.INT8); } private static void checkMap(String specA, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.map(a); assertEquals(expected, result.toString()); } private static void checkJoin(String specA, String specB, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.join(a, b); assertEquals(expected, result.toString()); } private static void checkJoinFails(String specA, String specB) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.join(a, b); System.err.println("join of "+a+" and "+b+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkReduce(String specA, List<String> dims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.reduce(a, dims); assertEquals(expected, result.toString()); } private static void checkFullReduce(String specA) { String expected = "tensor()"; List<String> dims = new ArrayList<>(); checkReduce(specA, dims, expected); var a = TensorType.fromSpec(specA); for (var dim : a.dimensions()) { dims.add(dim.name()); } checkReduce(specA, dims, expected); } private static void checkReduceFails(String specA, String dim) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.reduce(a, mkl(dim)); System.err.println("Reduce "+specA+" with dim "+dim+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkMerge(String specA, String specB, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.merge(a, b); assertEquals(expected, result.toString()); } private static void checkMergeFails(String specA, String specB) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.merge(a, b); System.err.println("merge of "+a+" and "+b+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkRename(String specA, List<String> fromDims, List<String> toDims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.rename(a, fromDims, toDims); assertEquals(expected, result.toString()); } private static void checkRenameFails(String specA, List<String> fromDims, List<String> toDims) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.rename(a, fromDims, toDims); System.err.println("rename "+a+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkConcat(String specA, String specB, String dim, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.concat(a, b, dim); assertEquals(expected, result.toString()); } private static void checkConcatFails(String specA, String specB, String dim) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.concat(a, b, dim); System.err.println("concat "+a+" and "+b+" along "+dim+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkPeek(String specA, List<String> dims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.peek(a, dims); assertEquals(expected, result.toString()); } private static void checkFullPeek(String specA) { String expected = "tensor()"; List<String> dims = new ArrayList<>(); var a = TensorType.fromSpec(specA); for (var dim : a.dimensions()) { dims.add(dim.name()); } checkPeek(specA, dims, expected); } private static void checkPeekFails(String specA, List<String> dims) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.peek(a, dims); System.err.println("Peek "+specA+" with dims "+dims+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkCast(String specA, TensorType.Value newValueType, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.cell_cast(a, newValueType); assertEquals(expected, result.toString()); } private static void checkCasts(String specA) { var a = TensorType.fromSpec(specA); for (var newValueType : TensorType.Value.values()) { var result = TypeResolver.cell_cast(a, newValueType); assertEquals(result.valueType(), newValueType); assertEquals(result.dimensions(), a.dimensions()); } } private static void checkCastFails(String specA, TensorType.Value newValueType) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.cell_cast(a, newValueType); System.err.println("cast of "+a+" to "+newValueType+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } }
class TypeResolverTestCase { private static List<String> mkl(String ...values) { return Arrays.asList(values); } @Test public void verifyMap() { checkMap("tensor()", "tensor()"); checkMap("tensor(x[10])", "tensor(x[10])"); checkMap("tensor(a[10],b[20],c[30])", "tensor(a[10],b[20],c[30])"); checkMap("tensor(y{})", "tensor(y{})"); checkMap("tensor(x[10],y{})", "tensor(x[10],y{})"); checkMap("tensor<float>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<float>(y{})", "tensor<float>(y{})"); checkMap("tensor<bfloat16>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMap("tensor<int8>(x[10])", "tensor<float>(x[10])"); checkMap("tensor<int8>(y{})", "tensor<float>(y{})"); } @Test @Test public void verifyReduce() { checkFullReduce("tensor()"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkFullReduce("tensor(x[10],y[20],z[30])"); checkFullReduce("tensor<float>(x[10],y[20],z[30])"); checkFullReduce("tensor<bfloat16>(x[10],y[20],z[30])"); checkFullReduce("tensor<int8>(x[10],y[20],z[30])"); checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])"); checkReduce("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkReduce("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkFullReduce("tensor(x[10],y{},z[30])"); checkFullReduce("tensor<float>(x[10],y{},z[30])"); checkFullReduce("tensor<bfloat16>(x[10],y{},z[30])"); checkFullReduce("tensor<int8>(x[10],y{},z[30])"); checkReduce("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkReduce("tensor()", mkl("x"), "tensor()"); checkReduce("tensor(y{})", mkl("x"), "tensor(y{})"); checkReduce("tensor<float>(y[10])", mkl("x"), "tensor<float>(y[10])"); checkReduce("tensor<int8>(y[10])", mkl("x"), "tensor<float>(y[10])"); } @Test public void verifyMerge() { checkMerge("tensor(a[10])", "tensor(a[10])", "tensor(a[10])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor<bfloat16>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMerge("tensor<bfloat16>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor<float>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(y{})", "tensor<float>(y{})", "tensor<float>(y{})"); checkMerge("tensor<float>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])"); checkMerge("tensor<int8>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})"); checkMerge("tensor()", "tensor()", "tensor()"); checkMerge("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])"); checkMerge("tensor(x[5])", "tensor(x[5])", "tensor(x[5])"); checkMerge("tensor(x{})", "tensor(x{})", "tensor(x{})"); checkMerge("tensor(x{},y{})", "tensor<bfloat16>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(x{},y{})", "tensor<float>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(x{},y{})", "tensor<int8>(x{},y{})", "tensor(x{},y{})"); checkMerge("tensor(y{})", "tensor(y{})", "tensor(y{})"); checkMerge("tensor(x{})", "tensor(x[5])", "tensor(x{})"); checkMergeFails("tensor(a[10])", "tensor()"); checkMergeFails("tensor(a[10])", "tensor(x{},y{},z{})"); checkMergeFails("tensor<bfloat16>(x[5])", "tensor()"); checkMergeFails("tensor<bfloat16>(x{})", "tensor()"); checkMergeFails("tensor<float>(x[5])", "tensor()"); checkMergeFails("tensor<float>(x{})", "tensor()"); checkMergeFails("tensor<int8>(x{})", "tensor()"); checkMergeFails("tensor()", "tensor<int8>(x[5])"); checkMergeFails("tensor()", "tensor(x{})"); checkMergeFails("tensor(x[3])", "tensor(x[5])"); checkMergeFails("tensor(x[5])", "tensor(x[3])"); checkMergeFails("tensor(x{})", "tensor()"); checkMergeFails("tensor(x{},y{})", "tensor(x{},z{})"); checkMergeFails("tensor(y{})", "tensor()"); } @Test public void verifyRename() { checkRename("tensor(x[10],y[20],z[30])", mkl("y"), mkl("a"), "tensor(a[20],x[10],z[30])"); checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})"); checkRename("tensor(x{},y[5])", mkl("x","y"), mkl("y","x"), "tensor(x[5],y{})"); checkRename("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), mkl("c", "a", "b"), "tensor(a[20],b[30],c[10])"); checkRename("tensor(x{})", mkl("x"), mkl("x"), "tensor(x{})"); checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})"); checkRename("tensor<float>(x{})", mkl("x"), mkl("y"), "tensor<float>(y{})"); checkRename("tensor<bfloat16>(x{})", mkl("x"), mkl("y"), "tensor<bfloat16>(y{})"); checkRename("tensor<int8>(x{})", mkl("x"), mkl("y"), "tensor<int8>(y{})"); checkRenameFails("tensor(x{})", mkl(), mkl()); checkRenameFails("tensor()", mkl(), mkl()); checkRenameFails("tensor(x{},y{})", mkl("x"), mkl("y","z")); checkRenameFails("tensor(x{},y{})", mkl("x","y"), mkl("z")); checkRenameFails("tensor()", mkl("a"), mkl("b")); checkRenameFails("tensor(x[10],y[20],z[30])", mkl("y","z"), mkl("a", "x")); } @Test public void verifyConcat() { checkConcat("tensor(y[7])", "tensor(x{})", "z", "tensor(x{},y[7],z[2])"); checkConcat("tensor()", "tensor()", "x", "tensor(x[2])"); checkConcat("tensor(x[2])", "tensor()", "x", "tensor(x[3])"); checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[2])", "tensor()", "y", "tensor(x[2],y[2])"); checkConcat("tensor(x[2])", "tensor(x[2])", "y", "tensor(x[2],y[2])"); checkConcat("tensor(x[2],y[2])", "tensor(x[3])", "x", "tensor(x[5],y[2])"); checkConcat("tensor(x[2],y[2])", "tensor(y[7])", "y", "tensor(x[2],y[9])"); checkConcat("tensor(x[5])", "tensor(y[7])", "z", "tensor(x[5],y[7],z[2])"); checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<float>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor(x[3])", "tensor<int8>(x[2])", "x", "tensor(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<float>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<float>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<bfloat16>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<bfloat16>(x[5])"); checkConcat("tensor<bfloat16>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])"); checkConcat("tensor<int8>(x[3])", "tensor<int8>(x[2])", "x", "tensor<int8>(x[5])"); checkConcat("tensor(x[3])", "tensor()", "x", "tensor(x[4])"); checkConcat("tensor<float>(x[3])", "tensor()", "x", "tensor<float>(x[4])"); checkConcat("tensor<bfloat16>(x[3])", "tensor()", "x", "tensor<bfloat16>(x[4])"); checkConcat("tensor<int8>(x[3])", "tensor()", "x", "tensor<int8>(x[4])"); checkConcatFails("tensor(x{})", "tensor(x[2])", "x"); checkConcatFails("tensor(x{})", "tensor(x{})", "x"); checkConcatFails("tensor(x{})", "tensor()", "x"); checkConcatFails("tensor(x[3])", "tensor(x[2])", "y"); } @Test public void verifyPeek() { checkPeek("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<bfloat16>(x[10],y[20])"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<int8>(x[10],y[20])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<bfloat16>(y[20])"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<int8>(y[20])"); checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])"); checkPeek("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<bfloat16>(x[10],y{})"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<int8>(x[10],y{})"); checkPeek("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<bfloat16>(y{})"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<int8>(y{})"); checkPeek("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()"); checkFullPeek("tensor(x[10],y[20],z[30])"); checkFullPeek("tensor<float>(x[10],y[20],z[30])"); checkFullPeek("tensor<bfloat16>(x[10],y[20],z[30])"); checkFullPeek("tensor<int8>(x[10],y[20],z[30])"); checkFullPeek("tensor(x[10],y{},z[30])"); checkFullPeek("tensor<float>(x[10],y{},z[30])"); checkFullPeek("tensor<bfloat16>(x[10],y{},z[30])"); checkFullPeek("tensor<int8>(x[10],y{},z[30])"); checkPeekFails("tensor()", mkl()); checkPeekFails("tensor()", mkl("x")); checkPeekFails("tensor(y{})", mkl("x")); checkPeekFails("tensor(y{})", mkl("y", "y")); checkPeekFails("tensor<float>(y[10])", mkl("x")); } @Test public void verifyCellCast() { checkCast("tensor(x[10],y[20],z[30])", TensorType.Value.FLOAT, "tensor<float>(x[10],y[20],z[30])"); checkCasts("tensor<double>(x[10])"); checkCasts("tensor<float>(x[10])"); checkCasts("tensor<bfloat16>(x[10])"); checkCasts("tensor<int8>(x[10])"); checkCasts("tensor<double>(x{})"); checkCasts("tensor<float>(x{})"); checkCasts("tensor<bfloat16>(x{})"); checkCasts("tensor<int8>(x{})"); checkCasts("tensor<double>(x{},y[5])"); checkCasts("tensor<float>(x{},y[5])"); checkCasts("tensor<bfloat16>(x{},y[5])"); checkCasts("tensor<int8>(x{},y[5])"); checkCast("tensor()", TensorType.Value.DOUBLE, "tensor()"); checkCastFails("tensor()", TensorType.Value.FLOAT); checkCastFails("tensor()", TensorType.Value.BFLOAT16); checkCastFails("tensor()", TensorType.Value.INT8); } private static void checkMap(String specA, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.map(a); assertEquals(expected, result.toString()); } private static void checkJoin(String specA, String specB, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.join(a, b); assertEquals(expected, result.toString()); } private static void checkJoinFails(String specA, String specB) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.join(a, b); System.err.println("join of "+a+" and "+b+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkReduce(String specA, List<String> dims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.reduce(a, dims); assertEquals(expected, result.toString()); } private static void checkFullReduce(String specA) { String expected = "tensor()"; List<String> dims = new ArrayList<>(); checkReduce(specA, dims, expected); var a = TensorType.fromSpec(specA); for (var dim : a.dimensions()) { dims.add(dim.name()); } checkReduce(specA, dims, expected); } private static void checkReduceFails(String specA, String dim) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.reduce(a, mkl(dim)); System.err.println("Reduce "+specA+" with dim "+dim+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkMerge(String specA, String specB, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.merge(a, b); assertEquals(expected, result.toString()); } private static void checkMergeFails(String specA, String specB) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.merge(a, b); System.err.println("merge of "+a+" and "+b+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkRename(String specA, List<String> fromDims, List<String> toDims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.rename(a, fromDims, toDims); assertEquals(expected, result.toString()); } private static void checkRenameFails(String specA, List<String> fromDims, List<String> toDims) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.rename(a, fromDims, toDims); System.err.println("rename "+a+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkConcat(String specA, String specB, String dim, String expected) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); var result = TypeResolver.concat(a, b, dim); assertEquals(expected, result.toString()); } private static void checkConcatFails(String specA, String specB, String dim) { var a = TensorType.fromSpec(specA); var b = TensorType.fromSpec(specB); boolean caught = false; try { var result = TypeResolver.concat(a, b, dim); System.err.println("concat "+a+" and "+b+" along "+dim+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkPeek(String specA, List<String> dims, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.peek(a, dims); assertEquals(expected, result.toString()); } private static void checkFullPeek(String specA) { String expected = "tensor()"; List<String> dims = new ArrayList<>(); var a = TensorType.fromSpec(specA); for (var dim : a.dimensions()) { dims.add(dim.name()); } checkPeek(specA, dims, expected); } private static void checkPeekFails(String specA, List<String> dims) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.peek(a, dims); System.err.println("Peek "+specA+" with dims "+dims+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } private static void checkCast(String specA, TensorType.Value newValueType, String expected) { var a = TensorType.fromSpec(specA); var result = TypeResolver.cell_cast(a, newValueType); assertEquals(expected, result.toString()); } private static void checkCasts(String specA) { var a = TensorType.fromSpec(specA); for (var newValueType : TensorType.Value.values()) { var result = TypeResolver.cell_cast(a, newValueType); assertEquals(result.valueType(), newValueType); assertEquals(result.dimensions(), a.dimensions()); } } private static void checkCastFails(String specA, TensorType.Value newValueType) { var a = TensorType.fromSpec(specA); boolean caught = false; try { var result = TypeResolver.cell_cast(a, newValueType); System.err.println("cast of "+a+" to "+newValueType+" produces: "+result); } catch (IllegalArgumentException e) { caught = true; } assertTrue(caught); } }
Return `Optional<String>` from here and `executeNodeCtlInContainer()` so we dont need to check `isBlank()` everywhere?
public String resumeNode(NodeAgentContext context) { return executeNodeCtlInContainer(context, "resume").getOutput(); }
return executeNodeCtlInContainer(context, "resume").getOutput();
public String resumeNode(NodeAgentContext context) { return executeNodeCtlInContainer(context, "resume").getOutput(); }
class ContainerOperationsImpl implements ContainerOperations { private static final Logger logger = Logger.getLogger(ContainerOperationsImpl.class.getName()); static final String MANAGER_NAME = "node-admin"; private static final InetAddress IPV6_NPT_PREFIX = InetAddresses.forString("fd00::"); private static final InetAddress IPV4_NPT_PREFIX = InetAddresses.forString("172.17.0.0"); private static final String ETC_MACHINE_ID = "/etc/machine-id"; private static final Random random = new Random(System.nanoTime()); private final ContainerEngine containerEngine; private final Terminal terminal; private final IPAddresses ipAddresses; private final FileSystem fileSystem; public ContainerOperationsImpl(ContainerEngine containerEngine, Terminal terminal, IPAddresses ipAddresses, FileSystem fileSystem) { this.containerEngine = containerEngine; this.terminal = terminal; this.ipAddresses = ipAddresses; this.fileSystem = fileSystem; } @Override public void createContainer(NodeAgentContext context, ContainerData containerData, ContainerResources containerResources) { context.log(logger, "Creating container"); ContainerEngine.CreateContainerCommand command = containerEngine.createContainerCommand( context.node().wantedDockerImage().get(), context.containerName()) .withHostName(context.node().hostname()) .withResources(containerResources) .withManagedBy(MANAGER_NAME) .withDnsOption("inet6") .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 409_600, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN") .withAddCapability("SYS_NICE"); if (context.nodeType() != NodeType.proxy && context.nodeType() != NodeType.controller) command.withSecurityOpt("no-new-privileges"); if (context.node().membership().map(m -> m.type().hasContent()).orElse(false)) command.withSecurityOpt("seccomp=unconfined"); ContainerNetworkMode networkMode = context.networkMode(); command.withNetworkMode(networkMode.networkName()); if (networkMode == ContainerNetworkMode.NPT) { Optional<? extends InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname()); Optional<? extends InetAddress> ipV6Local = ipAddresses.getIPv6Address(context.node().hostname()); assertEqualIpAddresses(context.hostname(), ipV4Local, context.node().ipAddresses(), IPVersion.IPv4); assertEqualIpAddresses(context.hostname(), ipV6Local, context.node().ipAddresses(), IPVersion.IPv6); if (ipV4Local.isEmpty() && ipV6Local.isEmpty()) { throw new ConvergenceException("Container " + context.node().hostname() + " with " + networkMode + " networking must have at least 1 IP address, but found none"); } ipV6Local = ipV6Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV6_NPT_PREFIX, 8)); ipV6Local.ifPresent(command::withIpAddress); ipV4Local = ipV4Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV4_NPT_PREFIX, 2)); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local); } else if (networkMode == ContainerNetworkMode.LOCAL) { var ipv4Address = ipAddresses.getIPv4Address(context.node().hostname()) .orElseThrow(() -> new IllegalArgumentException("No IPv4 address could be resolved from '" + context.hostname()+ "'")); command.withIpAddress(ipv4Address); } UnixPath machineIdPath = new UnixPath(context.pathOnHostFromPathInNode(ETC_MACHINE_ID)); if (!machineIdPath.exists()) { String machineId = String.format("%16x%16x\n", random.nextLong(), random.nextLong()); machineIdPath.createParents().writeUtf8File(machineId); context.log(logger, "Wrote " + machineId + " to " + machineIdPath); } addMounts(context, command); logger.info("Creating new container with args: " + command); command.create(); } private static void assertEqualIpAddresses(HostName hostName, Optional<? extends InetAddress> resolvedAddress, Set<String> nrAddresses, IPVersion ipVersion) { Optional<InetAddress> nrAddress = nrAddresses.stream() .map(InetAddresses::forString) .filter(ipVersion::match) .findFirst(); if (resolvedAddress.equals(nrAddress)) return; throw new ConvergenceException(String.format( "IP address (%s) resolved from %s does not match IP address (%s) in node-repo", resolvedAddress.map(InetAddresses::toAddrString).orElse("[none]"), hostName, nrAddress.map(InetAddresses::toAddrString).orElse("[none]"))); } void addEtcHosts(ContainerData containerData, String hostname, Optional<? extends InetAddress> ipV4Local, Optional<? extends InetAddress> ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n"); ipV6Local.ifPresent(ipv6 -> etcHosts.append(ipv6.getHostAddress()).append('\t').append(hostname).append('\n')); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress()).append('\t').append(hostname).append('\n')); containerData.addFile(fileSystem.getPath("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); containerEngine.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); containerEngine.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); containerEngine.deleteContainer(context.containerName()); } @Override public void updateContainer(NodeAgentContext context, ContainerId containerId, ContainerResources containerResources) { containerEngine.updateContainer(context.containerName(), containerResources); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return containerEngine.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(TaskContext context, DockerImage dockerImage, RegistryCredentials registryCredentials) { return containerEngine.pullImageAsyncIfNeeded(dockerImage, registryCredentials); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command) { int containerPid = containerEngine.getContainer(context.containerName()) .filter(container -> container.state.isRunning()) .orElseThrow(() -> new RuntimeException( "Found no running container named " + context.containerName().asString())) .pid; return terminal.newCommandLine(context) .add("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--") .add(command) .executeSilently(); } @Override @Override public String suspendNode(NodeAgentContext context) { return executeNodeCtlInContainer(context, "suspend").getOutput(); } @Override public String restartVespa(NodeAgentContext context) { return executeNodeCtlInContainer(context, "restart-vespa").getOutput(); } @Override public String startServices(NodeAgentContext context) { return executeNodeCtlInContainer(context, "start").getOutput(); } @Override public String stopServices(NodeAgentContext context) { return executeNodeCtlInContainer(context, "stop").getOutput(); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return containerEngine.getContainerStats(context.containerName()); } private void addMounts(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { var volumes = new VolumeHelper(context, command); volumes.addPrivateVolumes( ETC_MACHINE_ID, "/etc/vespa/flags", "/etc/yamas-agent", "/opt/splunkforwarder/var/log", "/var/log", "/var/log/journal", "/var/spool/postfix/maildrop", "logs/vespa", "logs/ysar", "tmp", "var/crash", "var/container-data", "var/db/vespa", "var/jdisc_container", "var/vespa", "var/zookeeper"); if (context.nodeType() == NodeType.proxy) { volumes.addPrivateVolumes("logs/nginx", "var/vespa-hosted/routing"); } else if (context.nodeType() == NodeType.tenant) volumes.addPrivateVolumes("/var/lib/sia"); if (isInfrastructureHost(context.nodeType())) volumes.addSharedVolumeMap("/var/lib/sia", "/var/lib/sia"); boolean isMain = context.zone().getSystemName() == SystemName.cd || context.zone().getSystemName() == SystemName.main; if (isMain && context.nodeType() == NodeType.tenant) volumes.addSharedVolumeMap("/var/zpe", "var/zpe"); } @Override public boolean noManagedContainersRunning(TaskContext context) { return containerEngine.noManagedContainersRunning(MANAGER_NAME); } @Override public boolean retainManagedContainers(TaskContext context, Set<ContainerName> containerNames) { return containerEngine.listManagedContainers(MANAGER_NAME).stream() .filter(containerName -> ! containerNames.contains(containerName)) .peek(containerName -> { containerEngine.stopContainer(containerName); containerEngine.deleteContainer(containerName); }).count() > 0; } @Override public boolean deleteUnusedContainerImages(TaskContext context, List<DockerImage> excludes, Duration minImageAgeToDelete) { return containerEngine.deleteUnusedDockerImages(excludes, minImageAgeToDelete); } /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.config || nodeType == NodeType.proxy || nodeType == NodeType.controller; } private static class VolumeHelper { private final NodeAgentContext context; private final ContainerEngine.CreateContainerCommand command; public VolumeHelper(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { this.context = context; this.command = command; } /** * Resolve each path to an absolute relative the container's vespa home directory. * Mounts the resulting path, under the container's storage directory as path in the container. */ public void addPrivateVolumes(String... pathsInNode) { Stream.of(pathsInNode).forEach(pathString -> { Path absolutePathInNode = resolveNodePath(pathString); Path pathOnHost = context.pathOnHostFromPathInNode(absolutePathInNode); command.withVolume(pathOnHost, absolutePathInNode); }); } /** * Mounts pathOnHost on the host as pathInNode in the container. Use for paths that * might be shared with other containers. */ public void addSharedVolumeMap(String pathOnHost, String pathInNode) { command.withSharedVolume(resolveNodePath(pathOnHost), resolveNodePath(pathInNode)); } private Path resolveNodePath(String pathString) { Path path = context.fileSystem().getPath(pathString); return path.isAbsolute() ? path : context.pathInNodeUnderVespaHome(path); } } }
class ContainerOperationsImpl implements ContainerOperations { private static final Logger logger = Logger.getLogger(ContainerOperationsImpl.class.getName()); static final String MANAGER_NAME = "node-admin"; private static final InetAddress IPV6_NPT_PREFIX = InetAddresses.forString("fd00::"); private static final InetAddress IPV4_NPT_PREFIX = InetAddresses.forString("172.17.0.0"); private static final String ETC_MACHINE_ID = "/etc/machine-id"; private static final Random random = new Random(System.nanoTime()); private final ContainerEngine containerEngine; private final Terminal terminal; private final IPAddresses ipAddresses; private final FileSystem fileSystem; public ContainerOperationsImpl(ContainerEngine containerEngine, Terminal terminal, IPAddresses ipAddresses, FileSystem fileSystem) { this.containerEngine = containerEngine; this.terminal = terminal; this.ipAddresses = ipAddresses; this.fileSystem = fileSystem; } @Override public void createContainer(NodeAgentContext context, ContainerData containerData, ContainerResources containerResources) { context.log(logger, "Creating container"); ContainerEngine.CreateContainerCommand command = containerEngine.createContainerCommand( context.node().wantedDockerImage().get(), context.containerName()) .withHostName(context.node().hostname()) .withResources(containerResources) .withManagedBy(MANAGER_NAME) .withDnsOption("inet6") .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 409_600, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN") .withAddCapability("SYS_NICE"); if (context.nodeType() != NodeType.proxy && context.nodeType() != NodeType.controller) command.withSecurityOpt("no-new-privileges"); if (context.node().membership().map(m -> m.type().hasContent()).orElse(false)) command.withSecurityOpt("seccomp=unconfined"); ContainerNetworkMode networkMode = context.networkMode(); command.withNetworkMode(networkMode.networkName()); if (networkMode == ContainerNetworkMode.NPT) { Optional<? extends InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname()); Optional<? extends InetAddress> ipV6Local = ipAddresses.getIPv6Address(context.node().hostname()); assertEqualIpAddresses(context.hostname(), ipV4Local, context.node().ipAddresses(), IPVersion.IPv4); assertEqualIpAddresses(context.hostname(), ipV6Local, context.node().ipAddresses(), IPVersion.IPv6); if (ipV4Local.isEmpty() && ipV6Local.isEmpty()) { throw new ConvergenceException("Container " + context.node().hostname() + " with " + networkMode + " networking must have at least 1 IP address, but found none"); } ipV6Local = ipV6Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV6_NPT_PREFIX, 8)); ipV6Local.ifPresent(command::withIpAddress); ipV4Local = ipV4Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV4_NPT_PREFIX, 2)); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local); } else if (networkMode == ContainerNetworkMode.LOCAL) { var ipv4Address = ipAddresses.getIPv4Address(context.node().hostname()) .orElseThrow(() -> new IllegalArgumentException("No IPv4 address could be resolved from '" + context.hostname()+ "'")); command.withIpAddress(ipv4Address); } UnixPath machineIdPath = new UnixPath(context.pathOnHostFromPathInNode(ETC_MACHINE_ID)); if (!machineIdPath.exists()) { String machineId = String.format("%16x%16x\n", random.nextLong(), random.nextLong()); machineIdPath.createParents().writeUtf8File(machineId); context.log(logger, "Wrote " + machineId + " to " + machineIdPath); } addMounts(context, command); logger.info("Creating new container with args: " + command); command.create(); } private static void assertEqualIpAddresses(HostName hostName, Optional<? extends InetAddress> resolvedAddress, Set<String> nrAddresses, IPVersion ipVersion) { Optional<InetAddress> nrAddress = nrAddresses.stream() .map(InetAddresses::forString) .filter(ipVersion::match) .findFirst(); if (resolvedAddress.equals(nrAddress)) return; throw new ConvergenceException(String.format( "IP address (%s) resolved from %s does not match IP address (%s) in node-repo", resolvedAddress.map(InetAddresses::toAddrString).orElse("[none]"), hostName, nrAddress.map(InetAddresses::toAddrString).orElse("[none]"))); } void addEtcHosts(ContainerData containerData, String hostname, Optional<? extends InetAddress> ipV4Local, Optional<? extends InetAddress> ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n"); ipV6Local.ifPresent(ipv6 -> etcHosts.append(ipv6.getHostAddress()).append('\t').append(hostname).append('\n')); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress()).append('\t').append(hostname).append('\n')); containerData.addFile(fileSystem.getPath("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); containerEngine.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); containerEngine.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); containerEngine.deleteContainer(context.containerName()); } @Override public void updateContainer(NodeAgentContext context, ContainerId containerId, ContainerResources containerResources) { containerEngine.updateContainer(context.containerName(), containerResources); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return containerEngine.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(TaskContext context, DockerImage dockerImage, RegistryCredentials registryCredentials) { return containerEngine.pullImageAsyncIfNeeded(dockerImage, registryCredentials); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command) { int containerPid = containerEngine.getContainer(context.containerName()) .filter(container -> container.state.isRunning()) .orElseThrow(() -> new RuntimeException( "Found no running container named " + context.containerName().asString())) .pid; return terminal.newCommandLine(context) .add("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--") .add(command) .executeSilently(); } @Override @Override public String suspendNode(NodeAgentContext context) { return executeNodeCtlInContainer(context, "suspend").getOutput(); } @Override public String restartVespa(NodeAgentContext context) { return executeNodeCtlInContainer(context, "restart-vespa").getOutput(); } @Override public String startServices(NodeAgentContext context) { return executeNodeCtlInContainer(context, "start").getOutput(); } @Override public String stopServices(NodeAgentContext context) { return executeNodeCtlInContainer(context, "stop").getOutput(); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return containerEngine.getContainerStats(context.containerName()); } private void addMounts(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { var volumes = new VolumeHelper(context, command); volumes.addPrivateVolumes( ETC_MACHINE_ID, "/etc/vespa/flags", "/etc/yamas-agent", "/opt/splunkforwarder/var/log", "/var/log", "/var/log/journal", "/var/spool/postfix/maildrop", "logs/vespa", "logs/ysar", "tmp", "var/crash", "var/container-data", "var/db/vespa", "var/jdisc_container", "var/vespa", "var/zookeeper"); if (context.nodeType() == NodeType.proxy) { volumes.addPrivateVolumes("logs/nginx", "var/vespa-hosted/routing"); } else if (context.nodeType() == NodeType.tenant) volumes.addPrivateVolumes("/var/lib/sia"); if (isInfrastructureHost(context.nodeType())) volumes.addSharedVolumeMap("/var/lib/sia", "/var/lib/sia"); boolean isMain = context.zone().getSystemName() == SystemName.cd || context.zone().getSystemName() == SystemName.main; if (isMain && context.nodeType() == NodeType.tenant) volumes.addSharedVolumeMap("/var/zpe", "var/zpe"); } @Override public boolean noManagedContainersRunning(TaskContext context) { return containerEngine.noManagedContainersRunning(MANAGER_NAME); } @Override public boolean retainManagedContainers(TaskContext context, Set<ContainerName> containerNames) { return containerEngine.listManagedContainers(MANAGER_NAME).stream() .filter(containerName -> ! containerNames.contains(containerName)) .peek(containerName -> { containerEngine.stopContainer(containerName); containerEngine.deleteContainer(containerName); }).count() > 0; } @Override public boolean deleteUnusedContainerImages(TaskContext context, List<DockerImage> excludes, Duration minImageAgeToDelete) { return containerEngine.deleteUnusedDockerImages(excludes, minImageAgeToDelete); } /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.config || nodeType == NodeType.proxy || nodeType == NodeType.controller; } private static class VolumeHelper { private final NodeAgentContext context; private final ContainerEngine.CreateContainerCommand command; public VolumeHelper(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { this.context = context; this.command = command; } /** * Resolve each path to an absolute relative the container's vespa home directory. * Mounts the resulting path, under the container's storage directory as path in the container. */ public void addPrivateVolumes(String... pathsInNode) { Stream.of(pathsInNode).forEach(pathString -> { Path absolutePathInNode = resolveNodePath(pathString); Path pathOnHost = context.pathOnHostFromPathInNode(absolutePathInNode); command.withVolume(pathOnHost, absolutePathInNode); }); } /** * Mounts pathOnHost on the host as pathInNode in the container. Use for paths that * might be shared with other containers. */ public void addSharedVolumeMap(String pathOnHost, String pathInNode) { command.withSharedVolume(resolveNodePath(pathOnHost), resolveNodePath(pathInNode)); } private Path resolveNodePath(String pathString) { Path path = context.fileSystem().getPath(pathString); return path.isAbsolute() ? path : context.pathInNodeUnderVespaHome(path); } } }
The only way to avoid testing for isBlank() everywhere would be to add a method that returns `Optional<String>` from e.g. ProcessResult. I don't know -- the output of a program is best modelled as String, not `Optional<String>` IMO. It's the invoker of the particular program that knows how to interpret the output, and in these cases and for now I'm thinking isBlank `=>` ignore, but that may change once this rolls out and we see what's printed.
public String resumeNode(NodeAgentContext context) { return executeNodeCtlInContainer(context, "resume").getOutput(); }
return executeNodeCtlInContainer(context, "resume").getOutput();
public String resumeNode(NodeAgentContext context) { return executeNodeCtlInContainer(context, "resume").getOutput(); }
class ContainerOperationsImpl implements ContainerOperations { private static final Logger logger = Logger.getLogger(ContainerOperationsImpl.class.getName()); static final String MANAGER_NAME = "node-admin"; private static final InetAddress IPV6_NPT_PREFIX = InetAddresses.forString("fd00::"); private static final InetAddress IPV4_NPT_PREFIX = InetAddresses.forString("172.17.0.0"); private static final String ETC_MACHINE_ID = "/etc/machine-id"; private static final Random random = new Random(System.nanoTime()); private final ContainerEngine containerEngine; private final Terminal terminal; private final IPAddresses ipAddresses; private final FileSystem fileSystem; public ContainerOperationsImpl(ContainerEngine containerEngine, Terminal terminal, IPAddresses ipAddresses, FileSystem fileSystem) { this.containerEngine = containerEngine; this.terminal = terminal; this.ipAddresses = ipAddresses; this.fileSystem = fileSystem; } @Override public void createContainer(NodeAgentContext context, ContainerData containerData, ContainerResources containerResources) { context.log(logger, "Creating container"); ContainerEngine.CreateContainerCommand command = containerEngine.createContainerCommand( context.node().wantedDockerImage().get(), context.containerName()) .withHostName(context.node().hostname()) .withResources(containerResources) .withManagedBy(MANAGER_NAME) .withDnsOption("inet6") .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 409_600, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN") .withAddCapability("SYS_NICE"); if (context.nodeType() != NodeType.proxy && context.nodeType() != NodeType.controller) command.withSecurityOpt("no-new-privileges"); if (context.node().membership().map(m -> m.type().hasContent()).orElse(false)) command.withSecurityOpt("seccomp=unconfined"); ContainerNetworkMode networkMode = context.networkMode(); command.withNetworkMode(networkMode.networkName()); if (networkMode == ContainerNetworkMode.NPT) { Optional<? extends InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname()); Optional<? extends InetAddress> ipV6Local = ipAddresses.getIPv6Address(context.node().hostname()); assertEqualIpAddresses(context.hostname(), ipV4Local, context.node().ipAddresses(), IPVersion.IPv4); assertEqualIpAddresses(context.hostname(), ipV6Local, context.node().ipAddresses(), IPVersion.IPv6); if (ipV4Local.isEmpty() && ipV6Local.isEmpty()) { throw new ConvergenceException("Container " + context.node().hostname() + " with " + networkMode + " networking must have at least 1 IP address, but found none"); } ipV6Local = ipV6Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV6_NPT_PREFIX, 8)); ipV6Local.ifPresent(command::withIpAddress); ipV4Local = ipV4Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV4_NPT_PREFIX, 2)); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local); } else if (networkMode == ContainerNetworkMode.LOCAL) { var ipv4Address = ipAddresses.getIPv4Address(context.node().hostname()) .orElseThrow(() -> new IllegalArgumentException("No IPv4 address could be resolved from '" + context.hostname()+ "'")); command.withIpAddress(ipv4Address); } UnixPath machineIdPath = new UnixPath(context.pathOnHostFromPathInNode(ETC_MACHINE_ID)); if (!machineIdPath.exists()) { String machineId = String.format("%16x%16x\n", random.nextLong(), random.nextLong()); machineIdPath.createParents().writeUtf8File(machineId); context.log(logger, "Wrote " + machineId + " to " + machineIdPath); } addMounts(context, command); logger.info("Creating new container with args: " + command); command.create(); } private static void assertEqualIpAddresses(HostName hostName, Optional<? extends InetAddress> resolvedAddress, Set<String> nrAddresses, IPVersion ipVersion) { Optional<InetAddress> nrAddress = nrAddresses.stream() .map(InetAddresses::forString) .filter(ipVersion::match) .findFirst(); if (resolvedAddress.equals(nrAddress)) return; throw new ConvergenceException(String.format( "IP address (%s) resolved from %s does not match IP address (%s) in node-repo", resolvedAddress.map(InetAddresses::toAddrString).orElse("[none]"), hostName, nrAddress.map(InetAddresses::toAddrString).orElse("[none]"))); } void addEtcHosts(ContainerData containerData, String hostname, Optional<? extends InetAddress> ipV4Local, Optional<? extends InetAddress> ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n"); ipV6Local.ifPresent(ipv6 -> etcHosts.append(ipv6.getHostAddress()).append('\t').append(hostname).append('\n')); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress()).append('\t').append(hostname).append('\n')); containerData.addFile(fileSystem.getPath("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); containerEngine.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); containerEngine.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); containerEngine.deleteContainer(context.containerName()); } @Override public void updateContainer(NodeAgentContext context, ContainerId containerId, ContainerResources containerResources) { containerEngine.updateContainer(context.containerName(), containerResources); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return containerEngine.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(TaskContext context, DockerImage dockerImage, RegistryCredentials registryCredentials) { return containerEngine.pullImageAsyncIfNeeded(dockerImage, registryCredentials); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command) { int containerPid = containerEngine.getContainer(context.containerName()) .filter(container -> container.state.isRunning()) .orElseThrow(() -> new RuntimeException( "Found no running container named " + context.containerName().asString())) .pid; return terminal.newCommandLine(context) .add("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--") .add(command) .executeSilently(); } @Override @Override public String suspendNode(NodeAgentContext context) { return executeNodeCtlInContainer(context, "suspend").getOutput(); } @Override public String restartVespa(NodeAgentContext context) { return executeNodeCtlInContainer(context, "restart-vespa").getOutput(); } @Override public String startServices(NodeAgentContext context) { return executeNodeCtlInContainer(context, "start").getOutput(); } @Override public String stopServices(NodeAgentContext context) { return executeNodeCtlInContainer(context, "stop").getOutput(); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return containerEngine.getContainerStats(context.containerName()); } private void addMounts(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { var volumes = new VolumeHelper(context, command); volumes.addPrivateVolumes( ETC_MACHINE_ID, "/etc/vespa/flags", "/etc/yamas-agent", "/opt/splunkforwarder/var/log", "/var/log", "/var/log/journal", "/var/spool/postfix/maildrop", "logs/vespa", "logs/ysar", "tmp", "var/crash", "var/container-data", "var/db/vespa", "var/jdisc_container", "var/vespa", "var/zookeeper"); if (context.nodeType() == NodeType.proxy) { volumes.addPrivateVolumes("logs/nginx", "var/vespa-hosted/routing"); } else if (context.nodeType() == NodeType.tenant) volumes.addPrivateVolumes("/var/lib/sia"); if (isInfrastructureHost(context.nodeType())) volumes.addSharedVolumeMap("/var/lib/sia", "/var/lib/sia"); boolean isMain = context.zone().getSystemName() == SystemName.cd || context.zone().getSystemName() == SystemName.main; if (isMain && context.nodeType() == NodeType.tenant) volumes.addSharedVolumeMap("/var/zpe", "var/zpe"); } @Override public boolean noManagedContainersRunning(TaskContext context) { return containerEngine.noManagedContainersRunning(MANAGER_NAME); } @Override public boolean retainManagedContainers(TaskContext context, Set<ContainerName> containerNames) { return containerEngine.listManagedContainers(MANAGER_NAME).stream() .filter(containerName -> ! containerNames.contains(containerName)) .peek(containerName -> { containerEngine.stopContainer(containerName); containerEngine.deleteContainer(containerName); }).count() > 0; } @Override public boolean deleteUnusedContainerImages(TaskContext context, List<DockerImage> excludes, Duration minImageAgeToDelete) { return containerEngine.deleteUnusedDockerImages(excludes, minImageAgeToDelete); } /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.config || nodeType == NodeType.proxy || nodeType == NodeType.controller; } private static class VolumeHelper { private final NodeAgentContext context; private final ContainerEngine.CreateContainerCommand command; public VolumeHelper(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { this.context = context; this.command = command; } /** * Resolve each path to an absolute relative the container's vespa home directory. * Mounts the resulting path, under the container's storage directory as path in the container. */ public void addPrivateVolumes(String... pathsInNode) { Stream.of(pathsInNode).forEach(pathString -> { Path absolutePathInNode = resolveNodePath(pathString); Path pathOnHost = context.pathOnHostFromPathInNode(absolutePathInNode); command.withVolume(pathOnHost, absolutePathInNode); }); } /** * Mounts pathOnHost on the host as pathInNode in the container. Use for paths that * might be shared with other containers. */ public void addSharedVolumeMap(String pathOnHost, String pathInNode) { command.withSharedVolume(resolveNodePath(pathOnHost), resolveNodePath(pathInNode)); } private Path resolveNodePath(String pathString) { Path path = context.fileSystem().getPath(pathString); return path.isAbsolute() ? path : context.pathInNodeUnderVespaHome(path); } } }
class ContainerOperationsImpl implements ContainerOperations { private static final Logger logger = Logger.getLogger(ContainerOperationsImpl.class.getName()); static final String MANAGER_NAME = "node-admin"; private static final InetAddress IPV6_NPT_PREFIX = InetAddresses.forString("fd00::"); private static final InetAddress IPV4_NPT_PREFIX = InetAddresses.forString("172.17.0.0"); private static final String ETC_MACHINE_ID = "/etc/machine-id"; private static final Random random = new Random(System.nanoTime()); private final ContainerEngine containerEngine; private final Terminal terminal; private final IPAddresses ipAddresses; private final FileSystem fileSystem; public ContainerOperationsImpl(ContainerEngine containerEngine, Terminal terminal, IPAddresses ipAddresses, FileSystem fileSystem) { this.containerEngine = containerEngine; this.terminal = terminal; this.ipAddresses = ipAddresses; this.fileSystem = fileSystem; } @Override public void createContainer(NodeAgentContext context, ContainerData containerData, ContainerResources containerResources) { context.log(logger, "Creating container"); ContainerEngine.CreateContainerCommand command = containerEngine.createContainerCommand( context.node().wantedDockerImage().get(), context.containerName()) .withHostName(context.node().hostname()) .withResources(containerResources) .withManagedBy(MANAGER_NAME) .withDnsOption("inet6") .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 409_600, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN") .withAddCapability("SYS_NICE"); if (context.nodeType() != NodeType.proxy && context.nodeType() != NodeType.controller) command.withSecurityOpt("no-new-privileges"); if (context.node().membership().map(m -> m.type().hasContent()).orElse(false)) command.withSecurityOpt("seccomp=unconfined"); ContainerNetworkMode networkMode = context.networkMode(); command.withNetworkMode(networkMode.networkName()); if (networkMode == ContainerNetworkMode.NPT) { Optional<? extends InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname()); Optional<? extends InetAddress> ipV6Local = ipAddresses.getIPv6Address(context.node().hostname()); assertEqualIpAddresses(context.hostname(), ipV4Local, context.node().ipAddresses(), IPVersion.IPv4); assertEqualIpAddresses(context.hostname(), ipV6Local, context.node().ipAddresses(), IPVersion.IPv6); if (ipV4Local.isEmpty() && ipV6Local.isEmpty()) { throw new ConvergenceException("Container " + context.node().hostname() + " with " + networkMode + " networking must have at least 1 IP address, but found none"); } ipV6Local = ipV6Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV6_NPT_PREFIX, 8)); ipV6Local.ifPresent(command::withIpAddress); ipV4Local = ipV4Local.map(ip -> IPAddresses.prefixTranslate(ip, IPV4_NPT_PREFIX, 2)); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local); } else if (networkMode == ContainerNetworkMode.LOCAL) { var ipv4Address = ipAddresses.getIPv4Address(context.node().hostname()) .orElseThrow(() -> new IllegalArgumentException("No IPv4 address could be resolved from '" + context.hostname()+ "'")); command.withIpAddress(ipv4Address); } UnixPath machineIdPath = new UnixPath(context.pathOnHostFromPathInNode(ETC_MACHINE_ID)); if (!machineIdPath.exists()) { String machineId = String.format("%16x%16x\n", random.nextLong(), random.nextLong()); machineIdPath.createParents().writeUtf8File(machineId); context.log(logger, "Wrote " + machineId + " to " + machineIdPath); } addMounts(context, command); logger.info("Creating new container with args: " + command); command.create(); } private static void assertEqualIpAddresses(HostName hostName, Optional<? extends InetAddress> resolvedAddress, Set<String> nrAddresses, IPVersion ipVersion) { Optional<InetAddress> nrAddress = nrAddresses.stream() .map(InetAddresses::forString) .filter(ipVersion::match) .findFirst(); if (resolvedAddress.equals(nrAddress)) return; throw new ConvergenceException(String.format( "IP address (%s) resolved from %s does not match IP address (%s) in node-repo", resolvedAddress.map(InetAddresses::toAddrString).orElse("[none]"), hostName, nrAddress.map(InetAddresses::toAddrString).orElse("[none]"))); } void addEtcHosts(ContainerData containerData, String hostname, Optional<? extends InetAddress> ipV4Local, Optional<? extends InetAddress> ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n"); ipV6Local.ifPresent(ipv6 -> etcHosts.append(ipv6.getHostAddress()).append('\t').append(hostname).append('\n')); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress()).append('\t').append(hostname).append('\n')); containerData.addFile(fileSystem.getPath("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); containerEngine.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); containerEngine.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); containerEngine.deleteContainer(context.containerName()); } @Override public void updateContainer(NodeAgentContext context, ContainerId containerId, ContainerResources containerResources) { containerEngine.updateContainer(context.containerName(), containerResources); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return containerEngine.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(TaskContext context, DockerImage dockerImage, RegistryCredentials registryCredentials) { return containerEngine.pullImageAsyncIfNeeded(dockerImage, registryCredentials); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return containerEngine.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command) { int containerPid = containerEngine.getContainer(context.containerName()) .filter(container -> container.state.isRunning()) .orElseThrow(() -> new RuntimeException( "Found no running container named " + context.containerName().asString())) .pid; return terminal.newCommandLine(context) .add("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--") .add(command) .executeSilently(); } @Override @Override public String suspendNode(NodeAgentContext context) { return executeNodeCtlInContainer(context, "suspend").getOutput(); } @Override public String restartVespa(NodeAgentContext context) { return executeNodeCtlInContainer(context, "restart-vespa").getOutput(); } @Override public String startServices(NodeAgentContext context) { return executeNodeCtlInContainer(context, "start").getOutput(); } @Override public String stopServices(NodeAgentContext context) { return executeNodeCtlInContainer(context, "stop").getOutput(); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return containerEngine.getContainerStats(context.containerName()); } private void addMounts(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { var volumes = new VolumeHelper(context, command); volumes.addPrivateVolumes( ETC_MACHINE_ID, "/etc/vespa/flags", "/etc/yamas-agent", "/opt/splunkforwarder/var/log", "/var/log", "/var/log/journal", "/var/spool/postfix/maildrop", "logs/vespa", "logs/ysar", "tmp", "var/crash", "var/container-data", "var/db/vespa", "var/jdisc_container", "var/vespa", "var/zookeeper"); if (context.nodeType() == NodeType.proxy) { volumes.addPrivateVolumes("logs/nginx", "var/vespa-hosted/routing"); } else if (context.nodeType() == NodeType.tenant) volumes.addPrivateVolumes("/var/lib/sia"); if (isInfrastructureHost(context.nodeType())) volumes.addSharedVolumeMap("/var/lib/sia", "/var/lib/sia"); boolean isMain = context.zone().getSystemName() == SystemName.cd || context.zone().getSystemName() == SystemName.main; if (isMain && context.nodeType() == NodeType.tenant) volumes.addSharedVolumeMap("/var/zpe", "var/zpe"); } @Override public boolean noManagedContainersRunning(TaskContext context) { return containerEngine.noManagedContainersRunning(MANAGER_NAME); } @Override public boolean retainManagedContainers(TaskContext context, Set<ContainerName> containerNames) { return containerEngine.listManagedContainers(MANAGER_NAME).stream() .filter(containerName -> ! containerNames.contains(containerName)) .peek(containerName -> { containerEngine.stopContainer(containerName); containerEngine.deleteContainer(containerName); }).count() > 0; } @Override public boolean deleteUnusedContainerImages(TaskContext context, List<DockerImage> excludes, Duration minImageAgeToDelete) { return containerEngine.deleteUnusedDockerImages(excludes, minImageAgeToDelete); } /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.config || nodeType == NodeType.proxy || nodeType == NodeType.controller; } private static class VolumeHelper { private final NodeAgentContext context; private final ContainerEngine.CreateContainerCommand command; public VolumeHelper(NodeAgentContext context, ContainerEngine.CreateContainerCommand command) { this.context = context; this.command = command; } /** * Resolve each path to an absolute relative the container's vespa home directory. * Mounts the resulting path, under the container's storage directory as path in the container. */ public void addPrivateVolumes(String... pathsInNode) { Stream.of(pathsInNode).forEach(pathString -> { Path absolutePathInNode = resolveNodePath(pathString); Path pathOnHost = context.pathOnHostFromPathInNode(absolutePathInNode); command.withVolume(pathOnHost, absolutePathInNode); }); } /** * Mounts pathOnHost on the host as pathInNode in the container. Use for paths that * might be shared with other containers. */ public void addSharedVolumeMap(String pathOnHost, String pathInNode) { command.withSharedVolume(resolveNodePath(pathOnHost), resolveNodePath(pathInNode)); } private Path resolveNodePath(String pathString) { Path path = context.fileSystem().getPath(pathString); return path.isAbsolute() ? path : context.pathInNodeUnderVespaHome(path); } } }
Please remove comment on line 103.
private List<VespaConfigChangeAction> validateAttributeSettings() { List<VespaConfigChangeAction> result = new ArrayList<>(); for (Attribute nextAttr : nextFields.attributes()) { Attribute currAttr = currentFields.getAttribute(nextAttr.getName()); if (currAttr != null) { validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastSearch, "fast-search", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastAccess, "fast-access", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryCase, "dictionary: cased/uncased", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isHuge, "huge", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::densePostingListThreshold, "dense-posting-list-threshold", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isEnabledOnlyBitVector, "rank: filter", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::distanceMetric, "distance-metric", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::hasHnswIndex, "indexing: index", result); if (hasHnswIndex(currAttr) && hasHnswIndex(nextAttr)) { validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::maxLinksPerNode, "max-links-per-node", result); validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::neighborsToExploreAtInsert, "neighbors-to-explore-at-insert", result); } } } return result; }
validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result);
private List<VespaConfigChangeAction> validateAttributeSettings() { List<VespaConfigChangeAction> result = new ArrayList<>(); for (Attribute nextAttr : nextFields.attributes()) { Attribute currAttr = currentFields.getAttribute(nextAttr.getName()); if (currAttr != null) { validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastSearch, "fast-search", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastAccess, "fast-access", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryCase, "dictionary: cased/uncased", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isHuge, "huge", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::densePostingListThreshold, "dense-posting-list-threshold", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isEnabledOnlyBitVector, "rank: filter", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::distanceMetric, "distance-metric", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::hasHnswIndex, "indexing: index", result); if (hasHnswIndex(currAttr) && hasHnswIndex(nextAttr)) { validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::maxLinksPerNode, "max-links-per-node", result); validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::neighborsToExploreAtInsert, "neighbors-to-explore-at-insert", result); } } } return result; }
class AttributeChangeValidator { private final ClusterSpec.Id id; private final AttributeFields currentFields; private final IndexSchema currentIndexSchema; private final NewDocumentType currentDocType; private final AttributeFields nextFields; private final IndexSchema nextIndexSchema; private final NewDocumentType nextDocType; public AttributeChangeValidator(ClusterSpec.Id id, AttributeFields currentFields, IndexSchema currentIndexSchema, NewDocumentType currentDocType, AttributeFields nextFields, IndexSchema nextIndexSchema, NewDocumentType nextDocType) { this.id = id; this.currentFields = currentFields; this.currentIndexSchema = currentIndexSchema; this.currentDocType = currentDocType; this.nextFields = nextFields; this.nextIndexSchema = nextIndexSchema; this.nextDocType = nextDocType; } public List<VespaConfigChangeAction> validate() { List<VespaConfigChangeAction> result = new ArrayList<>(); result.addAll(validateAddAttributeAspect()); result.addAll(validateRemoveAttributeAspect()); result.addAll(validateAttributeSettings()); return result; } private List<VespaConfigChangeAction> validateAddAttributeAspect() { return nextFields.attributes().stream(). map(attr -> attr.getName()). filter(attrName -> !currentFields.containsAttribute(attrName) && currentDocType.containsField(attrName)). map(attrName -> new VespaRestartAction(id, new ChangeMessageBuilder(attrName).addChange("add attribute aspect").build())). collect(Collectors.toList()); } private List<VespaConfigChangeAction> validateRemoveAttributeAspect() { return currentFields.attributes().stream(). map(attr -> attr.getName()). filter(attrName -> !nextFields.containsAttribute(attrName) && nextDocType.containsField(attrName) && !isIndexField(attrName)). map(attrName -> new VespaRestartAction(id, new ChangeMessageBuilder(attrName).addChange("remove attribute aspect").build())). collect(Collectors.toList()); } private boolean isIndexField(String fieldName) { return currentIndexSchema.containsField(fieldName) && nextIndexSchema.containsField(fieldName); } private static boolean hasHnswIndex(Attribute attribute) { return attribute.hnswIndexParams().isPresent(); } private static Dictionary.Type extractDictionaryType(Attribute attr) { Dictionary dict = attr.getDictionary(); return dict != null ? dict.getType() : Dictionary.Type.BTREE; } private static Case extractDictionaryCase(Attribute attr) { Dictionary dict = attr.getDictionary(); return dict != null ? dict.getMatch() : Case.UNCASED; } private static void validateAttributeSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Predicate<Attribute> predicate, String setting, List<VespaConfigChangeAction> result) { boolean nextValue = predicate.test(nextAttr); if (predicate.test(currentAttr) != nextValue) { String change = nextValue ? "add" : "remove"; result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(change + " attribute '" + setting + "'").build())); } } private static <T> void validateAttributeSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Function<Attribute, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { T currentValue = settingValueProvider.apply(currentAttr); T nextValue = settingValueProvider.apply(nextAttr); if ( ! Objects.equals(currentValue, nextValue)) { String message = String.format("change property '%s' from '%s' to '%s'", setting, currentValue, nextValue); result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); } } private static <T> void validateAttributeHnswIndexSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Function<HnswIndexParams, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { T currentValue = settingValueProvider.apply(currentAttr.hnswIndexParams().get()); T nextValue = settingValueProvider.apply(nextAttr.hnswIndexParams().get()); if (!Objects.equals(currentValue, nextValue)) { String message = String.format("change hnsw index property '%s' from '%s' to '%s'", setting, currentValue, nextValue); result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); } } }
class AttributeChangeValidator { private final ClusterSpec.Id id; private final AttributeFields currentFields; private final IndexSchema currentIndexSchema; private final NewDocumentType currentDocType; private final AttributeFields nextFields; private final IndexSchema nextIndexSchema; private final NewDocumentType nextDocType; public AttributeChangeValidator(ClusterSpec.Id id, AttributeFields currentFields, IndexSchema currentIndexSchema, NewDocumentType currentDocType, AttributeFields nextFields, IndexSchema nextIndexSchema, NewDocumentType nextDocType) { this.id = id; this.currentFields = currentFields; this.currentIndexSchema = currentIndexSchema; this.currentDocType = currentDocType; this.nextFields = nextFields; this.nextIndexSchema = nextIndexSchema; this.nextDocType = nextDocType; } public List<VespaConfigChangeAction> validate() { List<VespaConfigChangeAction> result = new ArrayList<>(); result.addAll(validateAddAttributeAspect()); result.addAll(validateRemoveAttributeAspect()); result.addAll(validateAttributeSettings()); return result; } private List<VespaConfigChangeAction> validateAddAttributeAspect() { return nextFields.attributes().stream(). map(attr -> attr.getName()). filter(attrName -> !currentFields.containsAttribute(attrName) && currentDocType.containsField(attrName)). map(attrName -> new VespaRestartAction(id, new ChangeMessageBuilder(attrName).addChange("add attribute aspect").build())). collect(Collectors.toList()); } private List<VespaConfigChangeAction> validateRemoveAttributeAspect() { return currentFields.attributes().stream(). map(attr -> attr.getName()). filter(attrName -> !nextFields.containsAttribute(attrName) && nextDocType.containsField(attrName) && !isIndexField(attrName)). map(attrName -> new VespaRestartAction(id, new ChangeMessageBuilder(attrName).addChange("remove attribute aspect").build())). collect(Collectors.toList()); } private boolean isIndexField(String fieldName) { return currentIndexSchema.containsField(fieldName) && nextIndexSchema.containsField(fieldName); } private static boolean hasHnswIndex(Attribute attribute) { return attribute.hnswIndexParams().isPresent(); } private static Dictionary.Type extractDictionaryType(Attribute attr) { Dictionary dict = attr.getDictionary(); return dict != null ? dict.getType() : Dictionary.Type.BTREE; } private static Case extractDictionaryCase(Attribute attr) { Dictionary dict = attr.getDictionary(); return dict != null ? dict.getMatch() : Case.UNCASED; } private static void validateAttributeSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Predicate<Attribute> predicate, String setting, List<VespaConfigChangeAction> result) { boolean nextValue = predicate.test(nextAttr); if (predicate.test(currentAttr) != nextValue) { String change = nextValue ? "add" : "remove"; result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(change + " attribute '" + setting + "'").build())); } } private static <T> void validateAttributeSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Function<Attribute, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { T currentValue = settingValueProvider.apply(currentAttr); T nextValue = settingValueProvider.apply(nextAttr); if ( ! Objects.equals(currentValue, nextValue)) { String message = String.format("change property '%s' from '%s' to '%s'", setting, currentValue, nextValue); result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); } } private static <T> void validateAttributeHnswIndexSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Function<HnswIndexParams, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { T currentValue = settingValueProvider.apply(currentAttr.hnswIndexParams().get()); T nextValue = settingValueProvider.apply(nextAttr.hnswIndexParams().get()); if (!Objects.equals(currentValue, nextValue)) { String message = String.format("change hnsw index property '%s' from '%s' to '%s'", setting, currentValue, nextValue); result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); } } }
Fixed
private List<VespaConfigChangeAction> validateAttributeSettings() { List<VespaConfigChangeAction> result = new ArrayList<>(); for (Attribute nextAttr : nextFields.attributes()) { Attribute currAttr = currentFields.getAttribute(nextAttr.getName()); if (currAttr != null) { validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastSearch, "fast-search", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastAccess, "fast-access", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryCase, "dictionary: cased/uncased", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isHuge, "huge", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::densePostingListThreshold, "dense-posting-list-threshold", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isEnabledOnlyBitVector, "rank: filter", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::distanceMetric, "distance-metric", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::hasHnswIndex, "indexing: index", result); if (hasHnswIndex(currAttr) && hasHnswIndex(nextAttr)) { validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::maxLinksPerNode, "max-links-per-node", result); validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::neighborsToExploreAtInsert, "neighbors-to-explore-at-insert", result); } } } return result; }
validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result);
private List<VespaConfigChangeAction> validateAttributeSettings() { List<VespaConfigChangeAction> result = new ArrayList<>(); for (Attribute nextAttr : nextFields.attributes()) { Attribute currAttr = currentFields.getAttribute(nextAttr.getName()); if (currAttr != null) { validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastSearch, "fast-search", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastAccess, "fast-access", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryCase, "dictionary: cased/uncased", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isHuge, "huge", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::densePostingListThreshold, "dense-posting-list-threshold", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::isEnabledOnlyBitVector, "rank: filter", result); validateAttributeSetting(id, currAttr, nextAttr, Attribute::distanceMetric, "distance-metric", result); validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::hasHnswIndex, "indexing: index", result); if (hasHnswIndex(currAttr) && hasHnswIndex(nextAttr)) { validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::maxLinksPerNode, "max-links-per-node", result); validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::neighborsToExploreAtInsert, "neighbors-to-explore-at-insert", result); } } } return result; }
class AttributeChangeValidator { private final ClusterSpec.Id id; private final AttributeFields currentFields; private final IndexSchema currentIndexSchema; private final NewDocumentType currentDocType; private final AttributeFields nextFields; private final IndexSchema nextIndexSchema; private final NewDocumentType nextDocType; public AttributeChangeValidator(ClusterSpec.Id id, AttributeFields currentFields, IndexSchema currentIndexSchema, NewDocumentType currentDocType, AttributeFields nextFields, IndexSchema nextIndexSchema, NewDocumentType nextDocType) { this.id = id; this.currentFields = currentFields; this.currentIndexSchema = currentIndexSchema; this.currentDocType = currentDocType; this.nextFields = nextFields; this.nextIndexSchema = nextIndexSchema; this.nextDocType = nextDocType; } public List<VespaConfigChangeAction> validate() { List<VespaConfigChangeAction> result = new ArrayList<>(); result.addAll(validateAddAttributeAspect()); result.addAll(validateRemoveAttributeAspect()); result.addAll(validateAttributeSettings()); return result; } private List<VespaConfigChangeAction> validateAddAttributeAspect() { return nextFields.attributes().stream(). map(attr -> attr.getName()). filter(attrName -> !currentFields.containsAttribute(attrName) && currentDocType.containsField(attrName)). map(attrName -> new VespaRestartAction(id, new ChangeMessageBuilder(attrName).addChange("add attribute aspect").build())). collect(Collectors.toList()); } private List<VespaConfigChangeAction> validateRemoveAttributeAspect() { return currentFields.attributes().stream(). map(attr -> attr.getName()). filter(attrName -> !nextFields.containsAttribute(attrName) && nextDocType.containsField(attrName) && !isIndexField(attrName)). map(attrName -> new VespaRestartAction(id, new ChangeMessageBuilder(attrName).addChange("remove attribute aspect").build())). collect(Collectors.toList()); } private boolean isIndexField(String fieldName) { return currentIndexSchema.containsField(fieldName) && nextIndexSchema.containsField(fieldName); } private static boolean hasHnswIndex(Attribute attribute) { return attribute.hnswIndexParams().isPresent(); } private static Dictionary.Type extractDictionaryType(Attribute attr) { Dictionary dict = attr.getDictionary(); return dict != null ? dict.getType() : Dictionary.Type.BTREE; } private static Case extractDictionaryCase(Attribute attr) { Dictionary dict = attr.getDictionary(); return dict != null ? dict.getMatch() : Case.UNCASED; } private static void validateAttributeSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Predicate<Attribute> predicate, String setting, List<VespaConfigChangeAction> result) { boolean nextValue = predicate.test(nextAttr); if (predicate.test(currentAttr) != nextValue) { String change = nextValue ? "add" : "remove"; result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(change + " attribute '" + setting + "'").build())); } } private static <T> void validateAttributeSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Function<Attribute, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { T currentValue = settingValueProvider.apply(currentAttr); T nextValue = settingValueProvider.apply(nextAttr); if ( ! Objects.equals(currentValue, nextValue)) { String message = String.format("change property '%s' from '%s' to '%s'", setting, currentValue, nextValue); result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); } } private static <T> void validateAttributeHnswIndexSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Function<HnswIndexParams, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { T currentValue = settingValueProvider.apply(currentAttr.hnswIndexParams().get()); T nextValue = settingValueProvider.apply(nextAttr.hnswIndexParams().get()); if (!Objects.equals(currentValue, nextValue)) { String message = String.format("change hnsw index property '%s' from '%s' to '%s'", setting, currentValue, nextValue); result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); } } }
class AttributeChangeValidator { private final ClusterSpec.Id id; private final AttributeFields currentFields; private final IndexSchema currentIndexSchema; private final NewDocumentType currentDocType; private final AttributeFields nextFields; private final IndexSchema nextIndexSchema; private final NewDocumentType nextDocType; public AttributeChangeValidator(ClusterSpec.Id id, AttributeFields currentFields, IndexSchema currentIndexSchema, NewDocumentType currentDocType, AttributeFields nextFields, IndexSchema nextIndexSchema, NewDocumentType nextDocType) { this.id = id; this.currentFields = currentFields; this.currentIndexSchema = currentIndexSchema; this.currentDocType = currentDocType; this.nextFields = nextFields; this.nextIndexSchema = nextIndexSchema; this.nextDocType = nextDocType; } public List<VespaConfigChangeAction> validate() { List<VespaConfigChangeAction> result = new ArrayList<>(); result.addAll(validateAddAttributeAspect()); result.addAll(validateRemoveAttributeAspect()); result.addAll(validateAttributeSettings()); return result; } private List<VespaConfigChangeAction> validateAddAttributeAspect() { return nextFields.attributes().stream(). map(attr -> attr.getName()). filter(attrName -> !currentFields.containsAttribute(attrName) && currentDocType.containsField(attrName)). map(attrName -> new VespaRestartAction(id, new ChangeMessageBuilder(attrName).addChange("add attribute aspect").build())). collect(Collectors.toList()); } private List<VespaConfigChangeAction> validateRemoveAttributeAspect() { return currentFields.attributes().stream(). map(attr -> attr.getName()). filter(attrName -> !nextFields.containsAttribute(attrName) && nextDocType.containsField(attrName) && !isIndexField(attrName)). map(attrName -> new VespaRestartAction(id, new ChangeMessageBuilder(attrName).addChange("remove attribute aspect").build())). collect(Collectors.toList()); } private boolean isIndexField(String fieldName) { return currentIndexSchema.containsField(fieldName) && nextIndexSchema.containsField(fieldName); } private static boolean hasHnswIndex(Attribute attribute) { return attribute.hnswIndexParams().isPresent(); } private static Dictionary.Type extractDictionaryType(Attribute attr) { Dictionary dict = attr.getDictionary(); return dict != null ? dict.getType() : Dictionary.Type.BTREE; } private static Case extractDictionaryCase(Attribute attr) { Dictionary dict = attr.getDictionary(); return dict != null ? dict.getMatch() : Case.UNCASED; } private static void validateAttributeSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Predicate<Attribute> predicate, String setting, List<VespaConfigChangeAction> result) { boolean nextValue = predicate.test(nextAttr); if (predicate.test(currentAttr) != nextValue) { String change = nextValue ? "add" : "remove"; result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(change + " attribute '" + setting + "'").build())); } } private static <T> void validateAttributeSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Function<Attribute, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { T currentValue = settingValueProvider.apply(currentAttr); T nextValue = settingValueProvider.apply(nextAttr); if ( ! Objects.equals(currentValue, nextValue)) { String message = String.format("change property '%s' from '%s' to '%s'", setting, currentValue, nextValue); result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); } } private static <T> void validateAttributeHnswIndexSetting(ClusterSpec.Id id, Attribute currentAttr, Attribute nextAttr, Function<HnswIndexParams, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { T currentValue = settingValueProvider.apply(currentAttr.hnswIndexParams().get()); T nextValue = settingValueProvider.apply(nextAttr.hnswIndexParams().get()); if (!Objects.equals(currentValue, nextValue)) { String message = String.format("change hnsw index property '%s' from '%s' to '%s'", setting, currentValue, nextValue); result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); } } }
it looks like this should be some lines earlier, and then compute `combinedValueType` from `concatType`
public Tensor evaluate(EvaluationContext<NAMETYPE> context) { Tensor a = argumentA.evaluate(context); Tensor b = argumentB.evaluate(context); TensorType.Value combinedValueType = TensorType.combinedValueType(a.type(), b.type()); a = ensureIndexedDimension(dimension, a, combinedValueType); b = ensureIndexedDimension(dimension, b, combinedValueType); IndexedTensor aIndexed = (IndexedTensor) a; IndexedTensor bIndexed = (IndexedTensor) b; TensorType concatType = TypeResolver.concat(a.type(), b.type(), dimension); DimensionSizes concatSize = concatSize(concatType, aIndexed, bIndexed, dimension); Tensor.Builder builder = Tensor.Builder.of(concatType, concatSize); long aDimensionLength = aIndexed.type().indexOfDimension(dimension).map(d -> aIndexed.dimensionSizes().size(d)).orElseThrow(RuntimeException::new); int[] aToIndexes = mapIndexes(a.type(), concatType); int[] bToIndexes = mapIndexes(b.type(), concatType); concatenateTo(aIndexed, bIndexed, aDimensionLength, concatType, aToIndexes, bToIndexes, builder); concatenateTo(bIndexed, aIndexed, 0, concatType, bToIndexes, aToIndexes, builder); return builder.build(); }
TensorType concatType = TypeResolver.concat(a.type(), b.type(), dimension);
public Tensor evaluate(EvaluationContext<NAMETYPE> context) { Tensor a = argumentA.evaluate(context); Tensor b = argumentB.evaluate(context); TensorType concatType = TypeResolver.concat(a.type(), b.type(), dimension); a = ensureIndexedDimension(dimension, a, concatType.valueType()); b = ensureIndexedDimension(dimension, b, concatType.valueType()); IndexedTensor aIndexed = (IndexedTensor) a; IndexedTensor bIndexed = (IndexedTensor) b; DimensionSizes concatSize = concatSize(concatType, aIndexed, bIndexed, dimension); Tensor.Builder builder = Tensor.Builder.of(concatType, concatSize); long aDimensionLength = aIndexed.type().indexOfDimension(dimension).map(d -> aIndexed.dimensionSizes().size(d)).orElseThrow(RuntimeException::new); int[] aToIndexes = mapIndexes(a.type(), concatType); int[] bToIndexes = mapIndexes(b.type(), concatType); concatenateTo(aIndexed, bIndexed, aDimensionLength, concatType, aToIndexes, bToIndexes, builder); concatenateTo(bIndexed, aIndexed, 0, concatType, bToIndexes, aToIndexes, builder); return builder.build(); }
class Concat<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYPE> { private final TensorFunction<NAMETYPE> argumentA, argumentB; private final String dimension; public Concat(TensorFunction<NAMETYPE> argumentA, TensorFunction<NAMETYPE> argumentB, String dimension) { Objects.requireNonNull(argumentA, "The first argument tensor cannot be null"); Objects.requireNonNull(argumentB, "The second argument tensor cannot be null"); Objects.requireNonNull(dimension, "The dimension cannot be null"); this.argumentA = argumentA; this.argumentB = argumentB; this.dimension = dimension; } @Override public List<TensorFunction<NAMETYPE>> arguments() { return ImmutableList.of(argumentA, argumentB); } @Override public TensorFunction<NAMETYPE> withArguments(List<TensorFunction<NAMETYPE>> arguments) { if (arguments.size() != 2) throw new IllegalArgumentException("Concat must have 2 arguments, got " + arguments.size()); return new Concat<>(arguments.get(0), arguments.get(1), dimension); } @Override public PrimitiveTensorFunction<NAMETYPE> toPrimitive() { return new Concat<>(argumentA.toPrimitive(), argumentB.toPrimitive(), dimension); } @Override public String toString(ToStringContext context) { return "concat(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + dimension + ")"; } @Override public TensorType type(TypeContext<NAMETYPE> context) { return TypeResolver.concat(argumentA.type(context), argumentB.type(context), dimension); } @Override private void concatenateTo(IndexedTensor a, IndexedTensor b, long offset, TensorType concatType, int[] aToIndexes, int[] bToIndexes, Tensor.Builder builder) { Set<String> otherADimensions = a.type().dimensionNames().stream().filter(d -> !d.equals(dimension)).collect(Collectors.toSet()); for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(otherADimensions); ia.hasNext();) { IndexedTensor.SubspaceIterator iaSubspace = ia.next(); TensorAddress aAddress = iaSubspace.address(); for (Iterator<IndexedTensor.SubspaceIterator> ib = b.subspaceIterator(otherADimensions); ib.hasNext();) { IndexedTensor.SubspaceIterator ibSubspace = ib.next(); while (ibSubspace.hasNext()) { Tensor.Cell bCell = ibSubspace.next(); TensorAddress combinedAddress = combineAddresses(aAddress, aToIndexes, bCell.getKey(), bToIndexes, concatType, offset, dimension); if (combinedAddress == null) continue; builder.cell(combinedAddress, bCell.getValue()); } iaSubspace.reset(); } } } private Tensor ensureIndexedDimension(String dimensionName, Tensor tensor, TensorType.Value combinedValueType) { Optional<TensorType.Dimension> dimension = tensor.type().dimension(dimensionName); if ( dimension.isPresent() ) { if ( ! dimension.get().isIndexed()) throw new IllegalArgumentException("Concat in dimension '" + dimensionName + "' requires that dimension to be indexed or absent, " + "but got a tensor with type " + tensor.type()); return tensor; } else { if (tensor.type().dimensions().stream().anyMatch(d -> ! d.isIndexed())) throw new IllegalArgumentException("Concat requires an indexed tensor, " + "but got a tensor with type " + tensor.type()); Tensor unitTensor = Tensor.Builder.of(new TensorType.Builder(combinedValueType) .indexed(dimensionName, 1) .build()) .cell(1,0) .build(); return tensor.multiply(unitTensor); } } /** Returns the concrete (not type) dimension sizes resulting from combining a and b */ private DimensionSizes concatSize(TensorType concatType, IndexedTensor a, IndexedTensor b, String concatDimension) { DimensionSizes.Builder concatSizes = new DimensionSizes.Builder(concatType.dimensions().size()); for (int i = 0; i < concatSizes.dimensions(); i++) { String currentDimension = concatType.dimensions().get(i).name(); long aSize = a.type().indexOfDimension(currentDimension).map(d -> a.dimensionSizes().size(d)).orElse(0L); long bSize = b.type().indexOfDimension(currentDimension).map(d -> b.dimensionSizes().size(d)).orElse(0L); if (currentDimension.equals(concatDimension)) concatSizes.set(i, aSize + bSize); else if (aSize != 0 && bSize != 0 && aSize!=bSize ) concatSizes.set(i, Math.min(aSize, bSize)); else concatSizes.set(i, Math.max(aSize, bSize)); } return concatSizes.build(); } /** * Combine two addresses, adding the offset to the concat dimension * * @return the combined address or null if the addresses are incompatible * (in some other dimension than the concat dimension) */ private TensorAddress combineAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes, TensorType concatType, long concatOffset, String concatDimension) { long[] combinedLabels = new long[concatType.dimensions().size()]; Arrays.fill(combinedLabels, -1); int concatDimensionIndex = concatType.indexOfDimension(concatDimension).get(); mapContent(a, combinedLabels, aToIndexes, concatDimensionIndex, concatOffset); boolean compatible = mapContent(b, combinedLabels, bToIndexes, concatDimensionIndex, concatOffset); if ( ! compatible) return null; return TensorAddress.of(combinedLabels); } /** * Returns the an array having one entry in order for each dimension of fromType * containing the index at which toType contains the same dimension name. * That is, if the returned array contains n at index i then * fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name()) * If some dimension in fromType is not present in toType, the corresponding index will be -1 */ private int[] mapIndexes(TensorType fromType, TensorType toType) { int[] toIndexes = new int[fromType.dimensions().size()]; for (int i = 0; i < fromType.dimensions().size(); i++) toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1); return toIndexes; } /** * Maps the content in the given list to the given array, using the given index map. * * @return true if the mapping was successful, false if one of the destination positions was * occupied by a different value */ private boolean mapContent(TensorAddress from, long[] to, int[] indexMap, int concatDimension, long concatOffset) { for (int i = 0; i < from.size(); i++) { int toIndex = indexMap[i]; if (concatDimension == toIndex) { to[toIndex] = from.numericLabel(i) + concatOffset; } else { if (to[toIndex] != -1 && to[toIndex] != from.numericLabel(i)) return false; to[toIndex] = from.numericLabel(i); } } return true; } }
class Concat<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYPE> { private final TensorFunction<NAMETYPE> argumentA, argumentB; private final String dimension; public Concat(TensorFunction<NAMETYPE> argumentA, TensorFunction<NAMETYPE> argumentB, String dimension) { Objects.requireNonNull(argumentA, "The first argument tensor cannot be null"); Objects.requireNonNull(argumentB, "The second argument tensor cannot be null"); Objects.requireNonNull(dimension, "The dimension cannot be null"); this.argumentA = argumentA; this.argumentB = argumentB; this.dimension = dimension; } @Override public List<TensorFunction<NAMETYPE>> arguments() { return ImmutableList.of(argumentA, argumentB); } @Override public TensorFunction<NAMETYPE> withArguments(List<TensorFunction<NAMETYPE>> arguments) { if (arguments.size() != 2) throw new IllegalArgumentException("Concat must have 2 arguments, got " + arguments.size()); return new Concat<>(arguments.get(0), arguments.get(1), dimension); } @Override public PrimitiveTensorFunction<NAMETYPE> toPrimitive() { return new Concat<>(argumentA.toPrimitive(), argumentB.toPrimitive(), dimension); } @Override public String toString(ToStringContext context) { return "concat(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + dimension + ")"; } @Override public TensorType type(TypeContext<NAMETYPE> context) { return TypeResolver.concat(argumentA.type(context), argumentB.type(context), dimension); } @Override private void concatenateTo(IndexedTensor a, IndexedTensor b, long offset, TensorType concatType, int[] aToIndexes, int[] bToIndexes, Tensor.Builder builder) { Set<String> otherADimensions = a.type().dimensionNames().stream().filter(d -> !d.equals(dimension)).collect(Collectors.toSet()); for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(otherADimensions); ia.hasNext();) { IndexedTensor.SubspaceIterator iaSubspace = ia.next(); TensorAddress aAddress = iaSubspace.address(); for (Iterator<IndexedTensor.SubspaceIterator> ib = b.subspaceIterator(otherADimensions); ib.hasNext();) { IndexedTensor.SubspaceIterator ibSubspace = ib.next(); while (ibSubspace.hasNext()) { Tensor.Cell bCell = ibSubspace.next(); TensorAddress combinedAddress = combineAddresses(aAddress, aToIndexes, bCell.getKey(), bToIndexes, concatType, offset, dimension); if (combinedAddress == null) continue; builder.cell(combinedAddress, bCell.getValue()); } iaSubspace.reset(); } } } private Tensor ensureIndexedDimension(String dimensionName, Tensor tensor, TensorType.Value combinedValueType) { Optional<TensorType.Dimension> dimension = tensor.type().dimension(dimensionName); if ( dimension.isPresent() ) { if ( ! dimension.get().isIndexed()) throw new IllegalArgumentException("Concat in dimension '" + dimensionName + "' requires that dimension to be indexed or absent, " + "but got a tensor with type " + tensor.type()); return tensor; } else { if (tensor.type().dimensions().stream().anyMatch(d -> ! d.isIndexed())) throw new IllegalArgumentException("Concat requires an indexed tensor, " + "but got a tensor with type " + tensor.type()); Tensor unitTensor = Tensor.Builder.of(new TensorType.Builder(combinedValueType) .indexed(dimensionName, 1) .build()) .cell(1,0) .build(); return tensor.multiply(unitTensor); } } /** Returns the concrete (not type) dimension sizes resulting from combining a and b */ private DimensionSizes concatSize(TensorType concatType, IndexedTensor a, IndexedTensor b, String concatDimension) { DimensionSizes.Builder concatSizes = new DimensionSizes.Builder(concatType.dimensions().size()); for (int i = 0; i < concatSizes.dimensions(); i++) { String currentDimension = concatType.dimensions().get(i).name(); long aSize = a.type().indexOfDimension(currentDimension).map(d -> a.dimensionSizes().size(d)).orElse(0L); long bSize = b.type().indexOfDimension(currentDimension).map(d -> b.dimensionSizes().size(d)).orElse(0L); if (currentDimension.equals(concatDimension)) concatSizes.set(i, aSize + bSize); else if (aSize != 0 && bSize != 0 && aSize!=bSize ) concatSizes.set(i, Math.min(aSize, bSize)); else concatSizes.set(i, Math.max(aSize, bSize)); } return concatSizes.build(); } /** * Combine two addresses, adding the offset to the concat dimension * * @return the combined address or null if the addresses are incompatible * (in some other dimension than the concat dimension) */ private TensorAddress combineAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes, TensorType concatType, long concatOffset, String concatDimension) { long[] combinedLabels = new long[concatType.dimensions().size()]; Arrays.fill(combinedLabels, -1); int concatDimensionIndex = concatType.indexOfDimension(concatDimension).get(); mapContent(a, combinedLabels, aToIndexes, concatDimensionIndex, concatOffset); boolean compatible = mapContent(b, combinedLabels, bToIndexes, concatDimensionIndex, concatOffset); if ( ! compatible) return null; return TensorAddress.of(combinedLabels); } /** * Returns the an array having one entry in order for each dimension of fromType * containing the index at which toType contains the same dimension name. * That is, if the returned array contains n at index i then * fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name()) * If some dimension in fromType is not present in toType, the corresponding index will be -1 */ private int[] mapIndexes(TensorType fromType, TensorType toType) { int[] toIndexes = new int[fromType.dimensions().size()]; for (int i = 0; i < fromType.dimensions().size(); i++) toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1); return toIndexes; } /** * Maps the content in the given list to the given array, using the given index map. * * @return true if the mapping was successful, false if one of the destination positions was * occupied by a different value */ private boolean mapContent(TensorAddress from, long[] to, int[] indexMap, int concatDimension, long concatOffset) { for (int i = 0; i < from.size(); i++) { int toIndex = indexMap[i]; if (concatDimension == toIndex) { to[toIndex] = from.numericLabel(i) + concatOffset; } else { if (to[toIndex] != -1 && to[toIndex] != from.numericLabel(i)) return false; to[toIndex] = from.numericLabel(i); } } return true; } }
Thanks!
public Tensor evaluate(EvaluationContext<NAMETYPE> context) { Tensor a = argumentA.evaluate(context); Tensor b = argumentB.evaluate(context); TensorType.Value combinedValueType = TensorType.combinedValueType(a.type(), b.type()); a = ensureIndexedDimension(dimension, a, combinedValueType); b = ensureIndexedDimension(dimension, b, combinedValueType); IndexedTensor aIndexed = (IndexedTensor) a; IndexedTensor bIndexed = (IndexedTensor) b; TensorType concatType = TypeResolver.concat(a.type(), b.type(), dimension); DimensionSizes concatSize = concatSize(concatType, aIndexed, bIndexed, dimension); Tensor.Builder builder = Tensor.Builder.of(concatType, concatSize); long aDimensionLength = aIndexed.type().indexOfDimension(dimension).map(d -> aIndexed.dimensionSizes().size(d)).orElseThrow(RuntimeException::new); int[] aToIndexes = mapIndexes(a.type(), concatType); int[] bToIndexes = mapIndexes(b.type(), concatType); concatenateTo(aIndexed, bIndexed, aDimensionLength, concatType, aToIndexes, bToIndexes, builder); concatenateTo(bIndexed, aIndexed, 0, concatType, bToIndexes, aToIndexes, builder); return builder.build(); }
TensorType concatType = TypeResolver.concat(a.type(), b.type(), dimension);
public Tensor evaluate(EvaluationContext<NAMETYPE> context) { Tensor a = argumentA.evaluate(context); Tensor b = argumentB.evaluate(context); TensorType concatType = TypeResolver.concat(a.type(), b.type(), dimension); a = ensureIndexedDimension(dimension, a, concatType.valueType()); b = ensureIndexedDimension(dimension, b, concatType.valueType()); IndexedTensor aIndexed = (IndexedTensor) a; IndexedTensor bIndexed = (IndexedTensor) b; DimensionSizes concatSize = concatSize(concatType, aIndexed, bIndexed, dimension); Tensor.Builder builder = Tensor.Builder.of(concatType, concatSize); long aDimensionLength = aIndexed.type().indexOfDimension(dimension).map(d -> aIndexed.dimensionSizes().size(d)).orElseThrow(RuntimeException::new); int[] aToIndexes = mapIndexes(a.type(), concatType); int[] bToIndexes = mapIndexes(b.type(), concatType); concatenateTo(aIndexed, bIndexed, aDimensionLength, concatType, aToIndexes, bToIndexes, builder); concatenateTo(bIndexed, aIndexed, 0, concatType, bToIndexes, aToIndexes, builder); return builder.build(); }
class Concat<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYPE> { private final TensorFunction<NAMETYPE> argumentA, argumentB; private final String dimension; public Concat(TensorFunction<NAMETYPE> argumentA, TensorFunction<NAMETYPE> argumentB, String dimension) { Objects.requireNonNull(argumentA, "The first argument tensor cannot be null"); Objects.requireNonNull(argumentB, "The second argument tensor cannot be null"); Objects.requireNonNull(dimension, "The dimension cannot be null"); this.argumentA = argumentA; this.argumentB = argumentB; this.dimension = dimension; } @Override public List<TensorFunction<NAMETYPE>> arguments() { return ImmutableList.of(argumentA, argumentB); } @Override public TensorFunction<NAMETYPE> withArguments(List<TensorFunction<NAMETYPE>> arguments) { if (arguments.size() != 2) throw new IllegalArgumentException("Concat must have 2 arguments, got " + arguments.size()); return new Concat<>(arguments.get(0), arguments.get(1), dimension); } @Override public PrimitiveTensorFunction<NAMETYPE> toPrimitive() { return new Concat<>(argumentA.toPrimitive(), argumentB.toPrimitive(), dimension); } @Override public String toString(ToStringContext context) { return "concat(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + dimension + ")"; } @Override public TensorType type(TypeContext<NAMETYPE> context) { return TypeResolver.concat(argumentA.type(context), argumentB.type(context), dimension); } @Override private void concatenateTo(IndexedTensor a, IndexedTensor b, long offset, TensorType concatType, int[] aToIndexes, int[] bToIndexes, Tensor.Builder builder) { Set<String> otherADimensions = a.type().dimensionNames().stream().filter(d -> !d.equals(dimension)).collect(Collectors.toSet()); for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(otherADimensions); ia.hasNext();) { IndexedTensor.SubspaceIterator iaSubspace = ia.next(); TensorAddress aAddress = iaSubspace.address(); for (Iterator<IndexedTensor.SubspaceIterator> ib = b.subspaceIterator(otherADimensions); ib.hasNext();) { IndexedTensor.SubspaceIterator ibSubspace = ib.next(); while (ibSubspace.hasNext()) { Tensor.Cell bCell = ibSubspace.next(); TensorAddress combinedAddress = combineAddresses(aAddress, aToIndexes, bCell.getKey(), bToIndexes, concatType, offset, dimension); if (combinedAddress == null) continue; builder.cell(combinedAddress, bCell.getValue()); } iaSubspace.reset(); } } } private Tensor ensureIndexedDimension(String dimensionName, Tensor tensor, TensorType.Value combinedValueType) { Optional<TensorType.Dimension> dimension = tensor.type().dimension(dimensionName); if ( dimension.isPresent() ) { if ( ! dimension.get().isIndexed()) throw new IllegalArgumentException("Concat in dimension '" + dimensionName + "' requires that dimension to be indexed or absent, " + "but got a tensor with type " + tensor.type()); return tensor; } else { if (tensor.type().dimensions().stream().anyMatch(d -> ! d.isIndexed())) throw new IllegalArgumentException("Concat requires an indexed tensor, " + "but got a tensor with type " + tensor.type()); Tensor unitTensor = Tensor.Builder.of(new TensorType.Builder(combinedValueType) .indexed(dimensionName, 1) .build()) .cell(1,0) .build(); return tensor.multiply(unitTensor); } } /** Returns the concrete (not type) dimension sizes resulting from combining a and b */ private DimensionSizes concatSize(TensorType concatType, IndexedTensor a, IndexedTensor b, String concatDimension) { DimensionSizes.Builder concatSizes = new DimensionSizes.Builder(concatType.dimensions().size()); for (int i = 0; i < concatSizes.dimensions(); i++) { String currentDimension = concatType.dimensions().get(i).name(); long aSize = a.type().indexOfDimension(currentDimension).map(d -> a.dimensionSizes().size(d)).orElse(0L); long bSize = b.type().indexOfDimension(currentDimension).map(d -> b.dimensionSizes().size(d)).orElse(0L); if (currentDimension.equals(concatDimension)) concatSizes.set(i, aSize + bSize); else if (aSize != 0 && bSize != 0 && aSize!=bSize ) concatSizes.set(i, Math.min(aSize, bSize)); else concatSizes.set(i, Math.max(aSize, bSize)); } return concatSizes.build(); } /** * Combine two addresses, adding the offset to the concat dimension * * @return the combined address or null if the addresses are incompatible * (in some other dimension than the concat dimension) */ private TensorAddress combineAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes, TensorType concatType, long concatOffset, String concatDimension) { long[] combinedLabels = new long[concatType.dimensions().size()]; Arrays.fill(combinedLabels, -1); int concatDimensionIndex = concatType.indexOfDimension(concatDimension).get(); mapContent(a, combinedLabels, aToIndexes, concatDimensionIndex, concatOffset); boolean compatible = mapContent(b, combinedLabels, bToIndexes, concatDimensionIndex, concatOffset); if ( ! compatible) return null; return TensorAddress.of(combinedLabels); } /** * Returns the an array having one entry in order for each dimension of fromType * containing the index at which toType contains the same dimension name. * That is, if the returned array contains n at index i then * fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name()) * If some dimension in fromType is not present in toType, the corresponding index will be -1 */ private int[] mapIndexes(TensorType fromType, TensorType toType) { int[] toIndexes = new int[fromType.dimensions().size()]; for (int i = 0; i < fromType.dimensions().size(); i++) toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1); return toIndexes; } /** * Maps the content in the given list to the given array, using the given index map. * * @return true if the mapping was successful, false if one of the destination positions was * occupied by a different value */ private boolean mapContent(TensorAddress from, long[] to, int[] indexMap, int concatDimension, long concatOffset) { for (int i = 0; i < from.size(); i++) { int toIndex = indexMap[i]; if (concatDimension == toIndex) { to[toIndex] = from.numericLabel(i) + concatOffset; } else { if (to[toIndex] != -1 && to[toIndex] != from.numericLabel(i)) return false; to[toIndex] = from.numericLabel(i); } } return true; } }
class Concat<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYPE> { private final TensorFunction<NAMETYPE> argumentA, argumentB; private final String dimension; public Concat(TensorFunction<NAMETYPE> argumentA, TensorFunction<NAMETYPE> argumentB, String dimension) { Objects.requireNonNull(argumentA, "The first argument tensor cannot be null"); Objects.requireNonNull(argumentB, "The second argument tensor cannot be null"); Objects.requireNonNull(dimension, "The dimension cannot be null"); this.argumentA = argumentA; this.argumentB = argumentB; this.dimension = dimension; } @Override public List<TensorFunction<NAMETYPE>> arguments() { return ImmutableList.of(argumentA, argumentB); } @Override public TensorFunction<NAMETYPE> withArguments(List<TensorFunction<NAMETYPE>> arguments) { if (arguments.size() != 2) throw new IllegalArgumentException("Concat must have 2 arguments, got " + arguments.size()); return new Concat<>(arguments.get(0), arguments.get(1), dimension); } @Override public PrimitiveTensorFunction<NAMETYPE> toPrimitive() { return new Concat<>(argumentA.toPrimitive(), argumentB.toPrimitive(), dimension); } @Override public String toString(ToStringContext context) { return "concat(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + dimension + ")"; } @Override public TensorType type(TypeContext<NAMETYPE> context) { return TypeResolver.concat(argumentA.type(context), argumentB.type(context), dimension); } @Override private void concatenateTo(IndexedTensor a, IndexedTensor b, long offset, TensorType concatType, int[] aToIndexes, int[] bToIndexes, Tensor.Builder builder) { Set<String> otherADimensions = a.type().dimensionNames().stream().filter(d -> !d.equals(dimension)).collect(Collectors.toSet()); for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(otherADimensions); ia.hasNext();) { IndexedTensor.SubspaceIterator iaSubspace = ia.next(); TensorAddress aAddress = iaSubspace.address(); for (Iterator<IndexedTensor.SubspaceIterator> ib = b.subspaceIterator(otherADimensions); ib.hasNext();) { IndexedTensor.SubspaceIterator ibSubspace = ib.next(); while (ibSubspace.hasNext()) { Tensor.Cell bCell = ibSubspace.next(); TensorAddress combinedAddress = combineAddresses(aAddress, aToIndexes, bCell.getKey(), bToIndexes, concatType, offset, dimension); if (combinedAddress == null) continue; builder.cell(combinedAddress, bCell.getValue()); } iaSubspace.reset(); } } } private Tensor ensureIndexedDimension(String dimensionName, Tensor tensor, TensorType.Value combinedValueType) { Optional<TensorType.Dimension> dimension = tensor.type().dimension(dimensionName); if ( dimension.isPresent() ) { if ( ! dimension.get().isIndexed()) throw new IllegalArgumentException("Concat in dimension '" + dimensionName + "' requires that dimension to be indexed or absent, " + "but got a tensor with type " + tensor.type()); return tensor; } else { if (tensor.type().dimensions().stream().anyMatch(d -> ! d.isIndexed())) throw new IllegalArgumentException("Concat requires an indexed tensor, " + "but got a tensor with type " + tensor.type()); Tensor unitTensor = Tensor.Builder.of(new TensorType.Builder(combinedValueType) .indexed(dimensionName, 1) .build()) .cell(1,0) .build(); return tensor.multiply(unitTensor); } } /** Returns the concrete (not type) dimension sizes resulting from combining a and b */ private DimensionSizes concatSize(TensorType concatType, IndexedTensor a, IndexedTensor b, String concatDimension) { DimensionSizes.Builder concatSizes = new DimensionSizes.Builder(concatType.dimensions().size()); for (int i = 0; i < concatSizes.dimensions(); i++) { String currentDimension = concatType.dimensions().get(i).name(); long aSize = a.type().indexOfDimension(currentDimension).map(d -> a.dimensionSizes().size(d)).orElse(0L); long bSize = b.type().indexOfDimension(currentDimension).map(d -> b.dimensionSizes().size(d)).orElse(0L); if (currentDimension.equals(concatDimension)) concatSizes.set(i, aSize + bSize); else if (aSize != 0 && bSize != 0 && aSize!=bSize ) concatSizes.set(i, Math.min(aSize, bSize)); else concatSizes.set(i, Math.max(aSize, bSize)); } return concatSizes.build(); } /** * Combine two addresses, adding the offset to the concat dimension * * @return the combined address or null if the addresses are incompatible * (in some other dimension than the concat dimension) */ private TensorAddress combineAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes, TensorType concatType, long concatOffset, String concatDimension) { long[] combinedLabels = new long[concatType.dimensions().size()]; Arrays.fill(combinedLabels, -1); int concatDimensionIndex = concatType.indexOfDimension(concatDimension).get(); mapContent(a, combinedLabels, aToIndexes, concatDimensionIndex, concatOffset); boolean compatible = mapContent(b, combinedLabels, bToIndexes, concatDimensionIndex, concatOffset); if ( ! compatible) return null; return TensorAddress.of(combinedLabels); } /** * Returns the an array having one entry in order for each dimension of fromType * containing the index at which toType contains the same dimension name. * That is, if the returned array contains n at index i then * fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name()) * If some dimension in fromType is not present in toType, the corresponding index will be -1 */ private int[] mapIndexes(TensorType fromType, TensorType toType) { int[] toIndexes = new int[fromType.dimensions().size()]; for (int i = 0; i < fromType.dimensions().size(); i++) toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1); return toIndexes; } /** * Maps the content in the given list to the given array, using the given index map. * * @return true if the mapping was successful, false if one of the destination positions was * occupied by a different value */ private boolean mapContent(TensorAddress from, long[] to, int[] indexMap, int concatDimension, long concatOffset) { for (int i = 0; i < from.size(); i++) { int toIndex = indexMap[i]; if (concatDimension == toIndex) { to[toIndex] = from.numericLabel(i) + concatOffset; } else { if (to[toIndex] != -1 && to[toIndex] != from.numericLabel(i)) return false; to[toIndex] = from.numericLabel(i); } } return true; } }
Unless of course the user uses some deprecated feature that is only enabled in certain environment/region, but it's much harder to aggregate all that without also duplicating the warnings that are present in all/several deployments. :see_no_evil:
public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<TenantRoles> tenantRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); rejectOldChange(instance, platform, revision, job, zone); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(job.application()) : NotificationSource.from(applicationId); List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream().filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()).map(log -> log.message).collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING); else controller.notificationsDb().addNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } }
public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<TenantRoles> tenantRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); rejectOldChange(instance, platform, revision, job, zone); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(job.application()) : NotificationSource.from(applicationId); List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream().filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()).map(log -> log.message).collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING); else controller.notificationsDb().setNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager( controller.zoneRegistry(), curator, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator(), clock); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<TenantRoles> tenantRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, tenantRoles, deploymentQuota, tenantSecretStores)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) { Deployment deployment = instance.deployments().get(zone); if (deployment == null) return; if (!zone.environment().isProduction()) return; boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned(); boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 && !(revision.isUnknown() && controller.system().isCd()); if (platformIsOlder || revisionIsOlder) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager( controller.zoneRegistry(), curator, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator(), clock); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<TenantRoles> tenantRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, tenantRoles, deploymentQuota, tenantSecretStores)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) { Deployment deployment = instance.deployments().get(zone); if (deployment == null) return; if (!zone.environment().isProduction()) return; boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned(); boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 && !(revision.isUnknown() && controller.system().isCd()); if (platformIsOlder || revisionIsOlder) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
A few more of these log messages should probably have their level raised in the application deployment log, but unfortunately this level also applies to our log
private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.getErrorCode()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: logger.log("Deployment failed with possibly transient error " + e.getErrorCode() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.getServerMessage()); return result; case OUT_OF_CAPACITY: logger.log(e.getServerMessage()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? Optional.empty() : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } }
logger.log(WARNING, "Controller could not validate certificate within " +
private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.getErrorCode()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: logger.log("Deployment failed with possibly transient error " + e.getErrorCode() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.getServerMessage()); return result; case OUT_OF_CAPACITY: logger.log(e.getServerMessage()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? Optional.empty() : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), id.application(), Set.of(active)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); boolean firstTick = run.convergenceSummary().isEmpty(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } ConvergenceSummary summary = nodeList.summary(); if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown())); if ( ! suspendedTooLong.isEmpty()) { failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, testerId, ImmutableSet.of(active, reserved)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; for (var endpoint : endpoints.get(zoneId)) { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if ( ! ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); return false; } } return true; } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone))); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } var policies = controller.routing().policies().get(new DeploymentId(id, zone)); for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } if (endpoint.routingMethod() == RoutingMethod.exclusive) { var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone)); if (policy == null) throw new IllegalStateException(endpoint + " has no matching policy in " + policies); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().zoneEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().addNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg); switch (run.status()) { case running: case aborted: return; case success: controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE); return; case outOfCapacity: if (run.id().type().isProduction()) updater.accept("due to lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("due to an invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("as nodes were not able to start the new Java containers"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest(); boolean useOsgiBasedTestRuntime = testerPlatformVersion(id).isAfter(new Version(7, 247, 11)); byte[] servicesXml = servicesXml(! controller.system().isPublic(), useTesterCertificate, useOsgiBasedTestRuntime, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml( boolean systemUsesAthenz, boolean useTesterCertificate, boolean useOsgiBasedTestRuntime, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = String.format(Locale.ENGLISH, "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String handlerAndExtraComponents = useOsgiBasedTestRuntime ? " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" : " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + handlerAndExtraComponents + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { controller.jobController().log(id, step, INFO, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), id.application(), Set.of(active)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); boolean firstTick = run.convergenceSummary().isEmpty(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } ConvergenceSummary summary = nodeList.summary(); if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown())); if ( ! suspendedTooLong.isEmpty()) { failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, testerId, ImmutableSet.of(active, reserved)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; for (var endpoint : endpoints.get(zoneId)) { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if ( ! ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); return false; } } return true; } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone))); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } var policies = controller.routing().policies().get(new DeploymentId(id, zone)); for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } if (endpoint.routingMethod() == RoutingMethod.exclusive) { var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone)); if (policy == null) throw new IllegalStateException(endpoint + " has no matching policy in " + policies); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().zoneEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg); switch (run.status()) { case running: case aborted: return; case success: controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE); return; case outOfCapacity: if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("nodes were not able to start the new Java containers"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest(); boolean useOsgiBasedTestRuntime = testerPlatformVersion(id).isAfter(new Version(7, 247, 11)); byte[] servicesXml = servicesXml(! controller.system().isPublic(), useTesterCertificate, useOsgiBasedTestRuntime, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml( boolean systemUsesAthenz, boolean useTesterCertificate, boolean useOsgiBasedTestRuntime, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = String.format(Locale.ENGLISH, "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String handlerAndExtraComponents = useOsgiBasedTestRuntime ? " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" : " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + handlerAndExtraComponents + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { controller.jobController().log(id, step, INFO, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
Can split the logging to deployment/vespa-log. It wasn't such a great idea to keep them together anyway ;)
private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.getErrorCode()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: logger.log("Deployment failed with possibly transient error " + e.getErrorCode() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.getServerMessage()); return result; case OUT_OF_CAPACITY: logger.log(e.getServerMessage()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? Optional.empty() : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } }
logger.log(WARNING, "Controller could not validate certificate within " +
private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.getErrorCode()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: logger.log("Deployment failed with possibly transient error " + e.getErrorCode() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.getServerMessage()); return result; case OUT_OF_CAPACITY: logger.log(e.getServerMessage()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? Optional.empty() : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), id.application(), Set.of(active)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); boolean firstTick = run.convergenceSummary().isEmpty(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } ConvergenceSummary summary = nodeList.summary(); if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown())); if ( ! suspendedTooLong.isEmpty()) { failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, testerId, ImmutableSet.of(active, reserved)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; for (var endpoint : endpoints.get(zoneId)) { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if ( ! ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); return false; } } return true; } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone))); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } var policies = controller.routing().policies().get(new DeploymentId(id, zone)); for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } if (endpoint.routingMethod() == RoutingMethod.exclusive) { var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone)); if (policy == null) throw new IllegalStateException(endpoint + " has no matching policy in " + policies); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().zoneEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().addNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg); switch (run.status()) { case running: case aborted: return; case success: controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE); return; case outOfCapacity: if (run.id().type().isProduction()) updater.accept("due to lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("due to an invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("as nodes were not able to start the new Java containers"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest(); boolean useOsgiBasedTestRuntime = testerPlatformVersion(id).isAfter(new Version(7, 247, 11)); byte[] servicesXml = servicesXml(! controller.system().isPublic(), useTesterCertificate, useOsgiBasedTestRuntime, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml( boolean systemUsesAthenz, boolean useTesterCertificate, boolean useOsgiBasedTestRuntime, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = String.format(Locale.ENGLISH, "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String handlerAndExtraComponents = useOsgiBasedTestRuntime ? " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" : " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + handlerAndExtraComponents + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { controller.jobController().log(id, step, INFO, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), id.application(), Set.of(active)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); boolean firstTick = run.convergenceSummary().isEmpty(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } ConvergenceSummary summary = nodeList.summary(); if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown())); if ( ! suspendedTooLong.isEmpty()) { failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, testerId, ImmutableSet.of(active, reserved)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; for (var endpoint : endpoints.get(zoneId)) { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if ( ! ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); return false; } } return true; } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone))); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } var policies = controller.routing().policies().get(new DeploymentId(id, zone)); for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } if (endpoint.routingMethod() == RoutingMethod.exclusive) { var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone)); if (policy == null) throw new IllegalStateException(endpoint + " has no matching policy in " + policies); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().zoneEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg); switch (run.status()) { case running: case aborted: return; case success: controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE); return; case outOfCapacity: if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("nodes were not able to start the new Java containers"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest(); boolean useOsgiBasedTestRuntime = testerPlatformVersion(id).isAfter(new Version(7, 247, 11)); byte[] servicesXml = servicesXml(! controller.system().isPublic(), useTesterCertificate, useOsgiBasedTestRuntime, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml( boolean systemUsesAthenz, boolean useTesterCertificate, boolean useOsgiBasedTestRuntime, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = String.format(Locale.ENGLISH, "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String handlerAndExtraComponents = useOsgiBasedTestRuntime ? " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" : " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + handlerAndExtraComponents + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { controller.jobController().log(id, step, INFO, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
`~(˘▾˘)~`
public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<TenantRoles> tenantRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); rejectOldChange(instance, platform, revision, job, zone); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(job.application()) : NotificationSource.from(applicationId); List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream().filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()).map(log -> log.message).collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING); else controller.notificationsDb().addNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } }
public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Optional<TenantRoles> tenantRoles = Optional.empty(); Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); rejectOldChange(instance, platform, revision, job, zone); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificateManager.getEndpointCertificateMetadata(instance, zone, applicationPackage.deploymentSpec().instance(instance.name())); containerEndpoints = controller.routing().containerEndpointsOf(application.get(), job.application().instance(), zone); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, tenantRoles); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(job.application()) : NotificationSource.from(applicationId); List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream().filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()).map(log -> log.message).collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING); else controller.notificationsDb().setNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager( controller.zoneRegistry(), curator, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator(), clock); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<TenantRoles> tenantRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, tenantRoles, deploymentQuota, tenantSecretStores)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) { Deployment deployment = instance.deployments().get(zone); if (deployment == null) return; if (!zone.environment().isProduction()) return; boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned(); boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 && !(revision.isUnknown() && controller.system().isCd()); if (platformIsOlder || revisionIsOlder) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificateManager endpointCertificateManager; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = controller; this.curator = curator; this.accessControl = accessControl; this.configServer = controller.serviceRegistry().configServer(); this.clock = clock; this.artifactRepository = controller.serviceRegistry().artifactRepository(); this.applicationStore = controller.serviceRegistry().applicationStore(); this.dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); this.billingController = billingController; deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificateManager = new EndpointCertificateManager( controller.zoneRegistry(), curator, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator(), clock); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, String.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), job.application(), EnumSet.of(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Fetches the requested application package from the artifact store(s). */ public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } for (InstanceName instance : declaredInstances) if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod)) application = controller.routing().assignRotations(application, instance); store(application); return application; } /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), Optional.empty()); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), Optional.empty()); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, Optional<TenantRoles> tenantRoles) { try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, tenantRoles, deploymentQuota, tenantSecretStores)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); accessControl.deleteApplication(id, credentials); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private void rejectOldChange(Instance instance, Version platform, ApplicationVersion revision, JobId job, ZoneId zone) { Deployment deployment = instance.deployments().get(zone); if (deployment == null) return; if (!zone.environment().isProduction()) return; boolean platformIsOlder = platform.compareTo(deployment.version()) < 0 && !instance.change().isPinned(); boolean revisionIsOlder = revision.compareTo(deployment.applicationVersion()) < 0 && !(revision.isUnknown() && controller.system().isCd()); if (platformIsOlder || revisionIsOlder) throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" + " are older than the currently deployed (platform: %s, application: %s).", job.application(), zone, platform, revision, deployment.version(), deployment.applicationVersion())); } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) : applicationStore.get(application.tenant(), application.application(), revision)); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { return controller.readVersionStatus().versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
```suggestion if ( ! run.id().type().environment().isTest()) updater.accept("due to lack of capacity. Please contact the Vespa team to request more!"); ``` No warnings for dev/perf?
private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().addNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg); switch (run.status()) { case running: case aborted: return; case success: controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE); return; case outOfCapacity: if (run.id().type().isProduction()) updater.accept("due to lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("due to an invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("as nodes were not able to start the new Java containers"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); }
if (run.id().type().isProduction()) updater.accept("due to lack of capacity. Please contact the Vespa team to request more!");
private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg); switch (run.status()) { case running: case aborted: return; case success: controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE); return; case outOfCapacity: if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("nodes were not able to start the new Java containers"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.getErrorCode()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: logger.log("Deployment failed with possibly transient error " + e.getErrorCode() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.getServerMessage()); return result; case OUT_OF_CAPACITY: logger.log(e.getServerMessage()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? Optional.empty() : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), id.application(), Set.of(active)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); boolean firstTick = run.convergenceSummary().isEmpty(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } ConvergenceSummary summary = nodeList.summary(); if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown())); if ( ! suspendedTooLong.isEmpty()) { failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, testerId, ImmutableSet.of(active, reserved)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; for (var endpoint : endpoints.get(zoneId)) { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if ( ! ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); return false; } } return true; } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone))); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } var policies = controller.routing().policies().get(new DeploymentId(id, zone)); for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } if (endpoint.routingMethod() == RoutingMethod.exclusive) { var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone)); if (policy == null) throw new IllegalStateException(endpoint + " has no matching policy in " + policies); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().zoneEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest(); boolean useOsgiBasedTestRuntime = testerPlatformVersion(id).isAfter(new Version(7, 247, 11)); byte[] servicesXml = servicesXml(! controller.system().isPublic(), useTesterCertificate, useOsgiBasedTestRuntime, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml( boolean systemUsesAthenz, boolean useTesterCertificate, boolean useOsgiBasedTestRuntime, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = String.format(Locale.ENGLISH, "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String handlerAndExtraComponents = useOsgiBasedTestRuntime ? " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" : " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + handlerAndExtraComponents + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { controller.jobController().log(id, step, INFO, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.getErrorCode()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: logger.log("Deployment failed with possibly transient error " + e.getErrorCode() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.getServerMessage()); return result; case OUT_OF_CAPACITY: logger.log(e.getServerMessage()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? Optional.empty() : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), id.application(), Set.of(active)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); boolean firstTick = run.convergenceSummary().isEmpty(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } ConvergenceSummary summary = nodeList.summary(); if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown())); if ( ! suspendedTooLong.isEmpty()) { failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, testerId, ImmutableSet.of(active, reserved)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; for (var endpoint : endpoints.get(zoneId)) { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if ( ! ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); return false; } } return true; } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone))); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } var policies = controller.routing().policies().get(new DeploymentId(id, zone)); for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } if (endpoint.routingMethod() == RoutingMethod.exclusive) { var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone)); if (policy == null) throw new IllegalStateException(endpoint + " has no matching policy in " + policies); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().zoneEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest(); boolean useOsgiBasedTestRuntime = testerPlatformVersion(id).isAfter(new Version(7, 247, 11)); byte[] servicesXml = servicesXml(! controller.system().isPublic(), useTesterCertificate, useOsgiBasedTestRuntime, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml( boolean systemUsesAthenz, boolean useTesterCertificate, boolean useOsgiBasedTestRuntime, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = String.format(Locale.ENGLISH, "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String handlerAndExtraComponents = useOsgiBasedTestRuntime ? " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" : " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + handlerAndExtraComponents + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { controller.jobController().log(id, step, INFO, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
This sentence doesn't follow the same pattern as the others.
private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().addNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg); switch (run.status()) { case running: case aborted: return; case success: controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE); return; case outOfCapacity: if (run.id().type().isProduction()) updater.accept("due to lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("due to an invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("as nodes were not able to start the new Java containers"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); }
updater.accept("one or more verification tests against the deployment failed");
private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg); switch (run.status()) { case running: case aborted: return; case success: controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE); return; case outOfCapacity: if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("nodes were not able to start the new Java containers"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.getErrorCode()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: logger.log("Deployment failed with possibly transient error " + e.getErrorCode() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.getServerMessage()); return result; case OUT_OF_CAPACITY: logger.log(e.getServerMessage()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? Optional.empty() : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), id.application(), Set.of(active)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); boolean firstTick = run.convergenceSummary().isEmpty(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } ConvergenceSummary summary = nodeList.summary(); if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown())); if ( ! suspendedTooLong.isEmpty()) { failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, testerId, ImmutableSet.of(active, reserved)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; for (var endpoint : endpoints.get(zoneId)) { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if ( ! ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); return false; } } return true; } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone))); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } var policies = controller.routing().policies().get(new DeploymentId(id, zone)); for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } if (endpoint.routingMethod() == RoutingMethod.exclusive) { var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone)); if (policy == null) throw new IllegalStateException(endpoint + " has no matching policy in " + policies); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().zoneEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest(); boolean useOsgiBasedTestRuntime = testerPlatformVersion(id).isAfter(new Version(7, 247, 11)); byte[] servicesXml = servicesXml(! controller.system().isPublic(), useTesterCertificate, useOsgiBasedTestRuntime, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml( boolean systemUsesAthenz, boolean useTesterCertificate, boolean useOsgiBasedTestRuntime, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = String.format(Locale.ENGLISH, "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String handlerAndExtraComponents = useOsgiBasedTestRuntime ? " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" : " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + handlerAndExtraComponents + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { controller.jobController().log(id, step, INFO, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.getErrorCode()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: logger.log("Deployment failed with possibly transient error " + e.getErrorCode() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.getServerMessage()); return result; case OUT_OF_CAPACITY: logger.log(e.getServerMessage()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? Optional.empty() : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), id.application(), Set.of(active)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); boolean firstTick = run.convergenceSummary().isEmpty(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } ConvergenceSummary summary = nodeList.summary(); if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown())); if ( ! suspendedTooLong.isEmpty()) { failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, testerId, ImmutableSet.of(active, reserved)); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, nodes.stream().map(node -> node.parentHostname().get()).collect(toList())); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all containers in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; for (var endpoint : endpoints.get(zoneId)) { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if ( ! ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); return false; } } return true; } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { var endpoints = controller.routing().zoneEndpointsOf(Set.of(new DeploymentId(id, zone))); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } var policies = controller.routing().policies().get(new DeploymentId(id, zone)); for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } if (endpoint.routingMethod() == RoutingMethod.exclusive) { var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone)); if (policy == null) throw new IllegalStateException(endpoint + " has no matching policy in " + policies); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().zoneEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = controller.system().isPublic() && id.type().environment().isTest(); boolean useOsgiBasedTestRuntime = testerPlatformVersion(id).isAfter(new Version(7, 247, 11)); byte[] servicesXml = servicesXml(! controller.system().isPublic(), useTesterCertificate, useOsgiBasedTestRuntime, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml( boolean systemUsesAthenz, boolean useTesterCertificate, boolean useOsgiBasedTestRuntime, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = String.format(Locale.ENGLISH, "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String handlerAndExtraComponents = useOsgiBasedTestRuntime ? " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" : " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + handlerAndExtraComponents + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { controller.jobController().log(id, step, INFO, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
A bit sneaky.
public boolean isProduction() { if (instance.isEmpty()) return true; return ! zoneId.map(ZoneId::environment) .or(() -> jobType.map(JobType::environment)) .map(Environment::isManuallyDeployed) .orElse(true); }
.orElse(true);
public boolean isProduction() { if (instance.isEmpty()) return true; return ! zoneId.map(ZoneId::environment) .or(() -> jobType.map(JobType::environment)) .map(Environment::isManuallyDeployed) .orElse(true); }
class NotificationSource { private final TenantName tenant; private final Optional<ApplicationName> application; private final Optional<InstanceName> instance; private final Optional<ZoneId> zoneId; private final Optional<ClusterSpec.Id> clusterId; private final Optional<JobType> jobType; private final OptionalLong runNumber; public NotificationSource(TenantName tenant, Optional<ApplicationName> application, Optional<InstanceName> instance, Optional<ZoneId> zoneId, Optional<ClusterSpec.Id> clusterId, Optional<JobType> jobType, OptionalLong runNumber) { this.tenant = Objects.requireNonNull(tenant, "tenant cannot be null"); this.application = Objects.requireNonNull(application, "application cannot be null"); this.instance = Objects.requireNonNull(instance, "instance cannot be null"); this.zoneId = Objects.requireNonNull(zoneId, "zoneId cannot be null"); this.clusterId = Objects.requireNonNull(clusterId, "clusterId cannot be null"); this.jobType = Objects.requireNonNull(jobType, "jobType cannot be null"); this.runNumber = Objects.requireNonNull(runNumber, "runNumber cannot be null"); if (instance.isPresent() && application.isEmpty()) throw new IllegalArgumentException("Application name must be present with instance name"); if (zoneId.isPresent() && instance.isEmpty()) throw new IllegalArgumentException("Instance name must be present with zone ID"); if (clusterId.isPresent() && zoneId.isEmpty()) throw new IllegalArgumentException("Zone ID must be present with cluster ID"); if (clusterId.isPresent() && jobType.isPresent()) throw new IllegalArgumentException("Cannot set both cluster ID and job type"); if (jobType.isPresent() && instance.isEmpty()) throw new IllegalArgumentException("Instance name must be present with job type"); if (jobType.isPresent() != runNumber.isPresent()) throw new IllegalArgumentException(String.format("Run number (%s) must be 1-to-1 with job type (%s)", runNumber.isPresent() ? "present" : "missing", jobType.map(i -> "present").orElse("missing"))); } public TenantName tenant() { return tenant; } public Optional<ApplicationName> application() { return application; } public Optional<InstanceName> instance() { return instance; } public Optional<ZoneId> zoneId() { return zoneId; } public Optional<ClusterSpec.Id> clusterId() { return clusterId; } public Optional<JobType> jobType() { return jobType; } public OptionalLong runNumber() { return runNumber; } /** * Returns true iff this source contains the given source. A source contains the other source if * all the set fields in this source are equal to the given source, while the fields not set * in this source are ignored. */ public boolean contains(NotificationSource other) { return tenant.equals(other.tenant) && (application.isEmpty() || application.equals(other.application)) && (instance.isEmpty() || instance.equals(other.instance)) && (zoneId.isEmpty() || zoneId.equals(other.zoneId)) && (clusterId.isEmpty() || clusterId.equals(other.clusterId)) && (jobType.isEmpty() || jobType.equals(other.jobType)); } /** * Returns whether this source from a production deployment or deployment related to prod deployment (e.g. to * staging zone), or if this is at tenant or application level */ @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NotificationSource that = (NotificationSource) o; return tenant.equals(that.tenant) && application.equals(that.application) && instance.equals(that.instance) && zoneId.equals(that.zoneId) && clusterId.equals(that.clusterId) && jobType.equals(that.jobType) && runNumber.equals(that.runNumber); } @Override public int hashCode() { return Objects.hash(tenant, application, instance, zoneId, clusterId, jobType, runNumber); } @Override public String toString() { return "NotificationSource{" + "tenant=" + tenant + application.map(application -> ", application=" + application.value()).orElse("") + instance.map(instance -> ", instance=" + instance.value()).orElse("") + zoneId.map(zoneId -> ", zone=" + zoneId.value()).orElse("") + clusterId.map(clusterId -> ", clusterId=" + clusterId.value()).orElse("") + jobType.map(jobType -> ", job=" + jobType.jobName() + " '}'; } private static NotificationSource from(TenantName tenant, ApplicationName application, InstanceName instance, ZoneId zoneId, ClusterSpec.Id clusterId, JobType jobType, Long runNumber) { return new NotificationSource(tenant, Optional.ofNullable(application), Optional.ofNullable(instance), Optional.ofNullable(zoneId), Optional.ofNullable(clusterId), Optional.ofNullable(jobType), runNumber == null ? OptionalLong.empty() : OptionalLong.of(runNumber)); } public static NotificationSource from(TenantName tenantName) { return from(tenantName, null, null, null, null, null, null); } public static NotificationSource from(TenantAndApplicationId id) { return from(id.tenant(), id.application(), null, null, null, null, null); } public static NotificationSource from(ApplicationId app) { return from(app.tenant(), app.application(), app.instance(), null, null, null, null); } public static NotificationSource from(DeploymentId deploymentId) { ApplicationId app = deploymentId.applicationId(); return from(app.tenant(), app.application(), app.instance(), deploymentId.zoneId(), null, null, null); } public static NotificationSource from(DeploymentId deploymentId, ClusterSpec.Id clusterId) { ApplicationId app = deploymentId.applicationId(); return from(app.tenant(), app.application(), app.instance(), deploymentId.zoneId(), clusterId, null, null); } public static NotificationSource from(RunId runId) { ApplicationId app = runId.application(); return from(app.tenant(), app.application(), app.instance(), null, null, runId.job().type(), runId.number()); } }
class NotificationSource { private final TenantName tenant; private final Optional<ApplicationName> application; private final Optional<InstanceName> instance; private final Optional<ZoneId> zoneId; private final Optional<ClusterSpec.Id> clusterId; private final Optional<JobType> jobType; private final OptionalLong runNumber; public NotificationSource(TenantName tenant, Optional<ApplicationName> application, Optional<InstanceName> instance, Optional<ZoneId> zoneId, Optional<ClusterSpec.Id> clusterId, Optional<JobType> jobType, OptionalLong runNumber) { this.tenant = Objects.requireNonNull(tenant, "tenant cannot be null"); this.application = Objects.requireNonNull(application, "application cannot be null"); this.instance = Objects.requireNonNull(instance, "instance cannot be null"); this.zoneId = Objects.requireNonNull(zoneId, "zoneId cannot be null"); this.clusterId = Objects.requireNonNull(clusterId, "clusterId cannot be null"); this.jobType = Objects.requireNonNull(jobType, "jobType cannot be null"); this.runNumber = Objects.requireNonNull(runNumber, "runNumber cannot be null"); if (instance.isPresent() && application.isEmpty()) throw new IllegalArgumentException("Application name must be present with instance name"); if (zoneId.isPresent() && instance.isEmpty()) throw new IllegalArgumentException("Instance name must be present with zone ID"); if (clusterId.isPresent() && zoneId.isEmpty()) throw new IllegalArgumentException("Zone ID must be present with cluster ID"); if (clusterId.isPresent() && jobType.isPresent()) throw new IllegalArgumentException("Cannot set both cluster ID and job type"); if (jobType.isPresent() && instance.isEmpty()) throw new IllegalArgumentException("Instance name must be present with job type"); if (jobType.isPresent() != runNumber.isPresent()) throw new IllegalArgumentException(String.format("Run number (%s) must be 1-to-1 with job type (%s)", runNumber.isPresent() ? "present" : "missing", jobType.map(i -> "present").orElse("missing"))); } public TenantName tenant() { return tenant; } public Optional<ApplicationName> application() { return application; } public Optional<InstanceName> instance() { return instance; } public Optional<ZoneId> zoneId() { return zoneId; } public Optional<ClusterSpec.Id> clusterId() { return clusterId; } public Optional<JobType> jobType() { return jobType; } public OptionalLong runNumber() { return runNumber; } /** * Returns true iff this source contains the given source. A source contains the other source if * all the set fields in this source are equal to the given source, while the fields not set * in this source are ignored. */ public boolean contains(NotificationSource other) { return tenant.equals(other.tenant) && (application.isEmpty() || application.equals(other.application)) && (instance.isEmpty() || instance.equals(other.instance)) && (zoneId.isEmpty() || zoneId.equals(other.zoneId)) && (clusterId.isEmpty() || clusterId.equals(other.clusterId)) && (jobType.isEmpty() || jobType.equals(other.jobType)); } /** * Returns whether this source from a production deployment or deployment related to prod deployment (e.g. to * staging zone), or if this is at tenant or application level */ @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NotificationSource that = (NotificationSource) o; return tenant.equals(that.tenant) && application.equals(that.application) && instance.equals(that.instance) && zoneId.equals(that.zoneId) && clusterId.equals(that.clusterId) && jobType.equals(that.jobType) && runNumber.equals(that.runNumber); } @Override public int hashCode() { return Objects.hash(tenant, application, instance, zoneId, clusterId, jobType, runNumber); } @Override public String toString() { return "NotificationSource{" + "tenant=" + tenant + application.map(application -> ", application=" + application.value()).orElse("") + instance.map(instance -> ", instance=" + instance.value()).orElse("") + zoneId.map(zoneId -> ", zone=" + zoneId.value()).orElse("") + clusterId.map(clusterId -> ", clusterId=" + clusterId.value()).orElse("") + jobType.map(jobType -> ", job=" + jobType.jobName() + " '}'; } private static NotificationSource from(TenantName tenant, ApplicationName application, InstanceName instance, ZoneId zoneId, ClusterSpec.Id clusterId, JobType jobType, Long runNumber) { return new NotificationSource(tenant, Optional.ofNullable(application), Optional.ofNullable(instance), Optional.ofNullable(zoneId), Optional.ofNullable(clusterId), Optional.ofNullable(jobType), runNumber == null ? OptionalLong.empty() : OptionalLong.of(runNumber)); } public static NotificationSource from(TenantName tenantName) { return from(tenantName, null, null, null, null, null, null); } public static NotificationSource from(TenantAndApplicationId id) { return from(id.tenant(), id.application(), null, null, null, null, null); } public static NotificationSource from(ApplicationId app) { return from(app.tenant(), app.application(), app.instance(), null, null, null, null); } public static NotificationSource from(DeploymentId deploymentId) { ApplicationId app = deploymentId.applicationId(); return from(app.tenant(), app.application(), app.instance(), deploymentId.zoneId(), null, null, null); } public static NotificationSource from(DeploymentId deploymentId, ClusterSpec.Id clusterId) { ApplicationId app = deploymentId.applicationId(); return from(app.tenant(), app.application(), app.instance(), deploymentId.zoneId(), clusterId, null, null); } public static NotificationSource from(RunId runId) { ApplicationId app = runId.application(); return from(app.tenant(), app.application(), app.instance(), null, null, runId.job().type(), runId.number()); } }
Do this under lock as well, and don't do the other stuff if you do this? (I see it ends up being a no-op, but I had to read the code to know that XD)
public void removeNotifications(NotificationSource source) { if (source.application().isEmpty()) curatorDb.deleteNotifications(source.tenant()); try (Lock lock = curatorDb.lockNotifications(source.tenant())) { List<Notification> initial = curatorDb.readNotifications(source.tenant()); List<Notification> filtered = initial.stream() .filter(notification -> !source.contains(notification.source())) .collect(Collectors.toUnmodifiableList()); if (initial.size() > filtered.size()) curatorDb.writeNotifications(source.tenant(), filtered); } }
curatorDb.deleteNotifications(source.tenant());
public void removeNotifications(NotificationSource source) { try (Lock lock = curatorDb.lockNotifications(source.tenant())) { if (source.application().isEmpty()) { curatorDb.deleteNotifications(source.tenant()); return; } List<Notification> initial = curatorDb.readNotifications(source.tenant()); List<Notification> filtered = initial.stream() .filter(notification -> !source.contains(notification.source())) .collect(Collectors.toUnmodifiableList()); if (initial.size() > filtered.size()) curatorDb.writeNotifications(source.tenant(), filtered); } }
class NotificationsDb { private final Clock clock; private final CuratorDb curatorDb; public NotificationsDb(Controller controller) { this(controller.clock(), controller.curator()); } NotificationsDb(Clock clock, CuratorDb curatorDb) { this.clock = clock; this.curatorDb = curatorDb; } public List<Notification> listNotifications(NotificationSource source, boolean productionOnly) { return curatorDb.readNotifications(source.tenant()).stream() .filter(notification -> source.contains(notification.source()) && (!productionOnly || notification.source().isProduction())) .collect(Collectors.toUnmodifiableList()); } public void addNotification(NotificationSource source, Notification.Type type, String message) { addNotification(source, type, List.of(message)); } /** * Add a notification with given source and type. If a notification with same source and type * already exists, it'll be replaced by this one instead */ public void addNotification(NotificationSource source, Notification.Type type, List<String> messages) { try (Lock lock = curatorDb.lockNotifications(source.tenant())) { List<Notification> notifications = curatorDb.readNotifications(source.tenant()).stream() .filter(notification -> !source.equals(notification.source()) || type != notification.type()) .collect(Collectors.toCollection(ArrayList::new)); notifications.add(new Notification(clock.instant(), type, source, messages)); curatorDb.writeNotifications(source.tenant(), notifications); } } /** Remove the notification with the given source and type */ public void removeNotification(NotificationSource source, Notification.Type type) { try (Lock lock = curatorDb.lockNotifications(source.tenant())) { List<Notification> initial = curatorDb.readNotifications(source.tenant()); List<Notification> filtered = initial.stream() .filter(notification -> !source.equals(notification.source()) || type != notification.type()) .collect(Collectors.toUnmodifiableList()); if (initial.size() > filtered.size()) curatorDb.writeNotifications(source.tenant(), filtered); } } /** Remove all notifications for this source or sources contained by this source */ }
class NotificationsDb { private final Clock clock; private final CuratorDb curatorDb; public NotificationsDb(Controller controller) { this(controller.clock(), controller.curator()); } NotificationsDb(Clock clock, CuratorDb curatorDb) { this.clock = clock; this.curatorDb = curatorDb; } public List<Notification> listNotifications(NotificationSource source, boolean productionOnly) { return curatorDb.readNotifications(source.tenant()).stream() .filter(notification -> source.contains(notification.source()) && (!productionOnly || notification.source().isProduction())) .collect(Collectors.toUnmodifiableList()); } public void setNotification(NotificationSource source, Notification.Type type, String message) { setNotification(source, type, List.of(message)); } /** * Add a notification with given source and type. If a notification with same source and type * already exists, it'll be replaced by this one instead */ public void setNotification(NotificationSource source, Notification.Type type, List<String> messages) { try (Lock lock = curatorDb.lockNotifications(source.tenant())) { List<Notification> notifications = curatorDb.readNotifications(source.tenant()).stream() .filter(notification -> !source.equals(notification.source()) || type != notification.type()) .collect(Collectors.toCollection(ArrayList::new)); notifications.add(new Notification(clock.instant(), type, source, messages)); curatorDb.writeNotifications(source.tenant(), notifications); } } /** Remove the notification with the given source and type */ public void removeNotification(NotificationSource source, Notification.Type type) { try (Lock lock = curatorDb.lockNotifications(source.tenant())) { List<Notification> initial = curatorDb.readNotifications(source.tenant()); List<Notification> filtered = initial.stream() .filter(notification -> !source.equals(notification.source()) || type != notification.type()) .collect(Collectors.toUnmodifiableList()); if (initial.size() > filtered.size()) curatorDb.writeNotifications(source.tenant(), filtered); } } /** Remove all notifications for this source or sources contained by this source */ }
I suspect you should extract the flag value in the constructor to ensure a consistent value (in case flag is flipped while retrieving config).
public boolean allowDisableMtls() { return allowDisableMtls.value(); }
return allowDisableMtls.value();
public boolean allowDisableMtls() { return allowDisableMtls; }
class Properties implements ModelContext.Properties { private final ModelContext.FeatureFlags featureFlags; private final ApplicationId applicationId; private final boolean multitenant; private final List<ConfigServerSpec> configServerSpecs; private final HostName loadBalancerName; private final URI ztsUrl; private final String athenzDnsSuffix; private final boolean hostedVespa; private final Zone zone; private final Set<ContainerEndpoint> endpoints; private final boolean isBootstrap; private final boolean isFirstTimeDeployment; private final Optional<EndpointCertificateSecrets> endpointCertificateSecrets; private final Optional<AthenzDomain> athenzDomain; private final Optional<ApplicationRoles> applicationRoles; private final Quota quota; private final List<TenantSecretStore> tenantSecretStores; private final SecretStore secretStore; private final StringFlag jvmGCOptionsFlag; private final BooleanFlag allowDisableMtls; public Properties(ApplicationId applicationId, ConfigserverConfig configserverConfig, Zone zone, Set<ContainerEndpoint> endpoints, boolean isBootstrap, boolean isFirstTimeDeployment, FlagSource flagSource, Optional<EndpointCertificateSecrets> endpointCertificateSecrets, Optional<AthenzDomain> athenzDomain, Optional<ApplicationRoles> applicationRoles, Optional<Quota> maybeQuota, List<TenantSecretStore> tenantSecretStores, SecretStore secretStore) { this.featureFlags = new FeatureFlags(flagSource, applicationId); this.applicationId = applicationId; this.multitenant = configserverConfig.multitenant() || configserverConfig.hostedVespa() || Boolean.getBoolean("multitenant"); this.configServerSpecs = fromConfig(configserverConfig); this.loadBalancerName = HostName.from(configserverConfig.loadBalancerAddress()); this.ztsUrl = configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null; this.athenzDnsSuffix = configserverConfig.athenzDnsSuffix(); this.hostedVespa = configserverConfig.hostedVespa(); this.zone = zone; this.endpoints = endpoints; this.isBootstrap = isBootstrap; this.isFirstTimeDeployment = isFirstTimeDeployment; this.endpointCertificateSecrets = endpointCertificateSecrets; this.athenzDomain = athenzDomain; this.applicationRoles = applicationRoles; this.quota = maybeQuota.orElseGet(Quota::unlimited); this.tenantSecretStores = tenantSecretStores; this.secretStore = secretStore; this.jvmGCOptionsFlag = PermanentFlags.JVM_GC_OPTIONS.bindTo(flagSource) .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()); this.allowDisableMtls = PermanentFlags.ALLOW_DISABLE_MTLS.bindTo(flagSource) .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()); } @Override public ModelContext.FeatureFlags featureFlags() { return featureFlags; } @Override public boolean multitenant() { return multitenant; } @Override public ApplicationId applicationId() { return applicationId; } @Override public List<ConfigServerSpec> configServerSpecs() { return configServerSpecs; } @Override public HostName loadBalancerName() { return loadBalancerName; } @Override public URI ztsUrl() { return ztsUrl; } @Override public String athenzDnsSuffix() { return athenzDnsSuffix; } @Override public boolean hostedVespa() { return hostedVespa; } @Override public Zone zone() { return zone; } @Override public Set<ContainerEndpoint> endpoints() { return endpoints; } @Override public boolean isBootstrap() { return isBootstrap; } @Override public boolean isFirstTimeDeployment() { return isFirstTimeDeployment; } @Override public Optional<EndpointCertificateSecrets> endpointCertificateSecrets() { return endpointCertificateSecrets; } @Override public Optional<AthenzDomain> athenzDomain() { return athenzDomain; } @Override public Optional<ApplicationRoles> applicationRoles() { return applicationRoles; } @Override public Quota quota() { return quota; } @Override public List<TenantSecretStore> tenantSecretStores() { return SecretStoreExternalIdRetriever.populateExternalId(secretStore, applicationId.tenant(), zone.system(), tenantSecretStores); } @Override public String jvmGCOptions(Optional<ClusterSpec.Type> clusterType) { return flagValueForClusterType(jvmGCOptionsFlag, clusterType); } @Override public String flagValueForClusterType(StringFlag flag, Optional<ClusterSpec.Type> clusterType) { return clusterType.map(type -> flag.with(CLUSTER_TYPE, type.name())) .orElse(flag) .value(); } }
class Properties implements ModelContext.Properties { private final ModelContext.FeatureFlags featureFlags; private final ApplicationId applicationId; private final boolean multitenant; private final List<ConfigServerSpec> configServerSpecs; private final HostName loadBalancerName; private final URI ztsUrl; private final String athenzDnsSuffix; private final boolean hostedVespa; private final Zone zone; private final Set<ContainerEndpoint> endpoints; private final boolean isBootstrap; private final boolean isFirstTimeDeployment; private final Optional<EndpointCertificateSecrets> endpointCertificateSecrets; private final Optional<AthenzDomain> athenzDomain; private final Optional<ApplicationRoles> applicationRoles; private final Quota quota; private final List<TenantSecretStore> tenantSecretStores; private final SecretStore secretStore; private final StringFlag jvmGCOptionsFlag; private final boolean allowDisableMtls; public Properties(ApplicationId applicationId, ConfigserverConfig configserverConfig, Zone zone, Set<ContainerEndpoint> endpoints, boolean isBootstrap, boolean isFirstTimeDeployment, FlagSource flagSource, Optional<EndpointCertificateSecrets> endpointCertificateSecrets, Optional<AthenzDomain> athenzDomain, Optional<ApplicationRoles> applicationRoles, Optional<Quota> maybeQuota, List<TenantSecretStore> tenantSecretStores, SecretStore secretStore) { this.featureFlags = new FeatureFlags(flagSource, applicationId); this.applicationId = applicationId; this.multitenant = configserverConfig.multitenant() || configserverConfig.hostedVespa() || Boolean.getBoolean("multitenant"); this.configServerSpecs = fromConfig(configserverConfig); this.loadBalancerName = HostName.from(configserverConfig.loadBalancerAddress()); this.ztsUrl = configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null; this.athenzDnsSuffix = configserverConfig.athenzDnsSuffix(); this.hostedVespa = configserverConfig.hostedVespa(); this.zone = zone; this.endpoints = endpoints; this.isBootstrap = isBootstrap; this.isFirstTimeDeployment = isFirstTimeDeployment; this.endpointCertificateSecrets = endpointCertificateSecrets; this.athenzDomain = athenzDomain; this.applicationRoles = applicationRoles; this.quota = maybeQuota.orElseGet(Quota::unlimited); this.tenantSecretStores = tenantSecretStores; this.secretStore = secretStore; this.jvmGCOptionsFlag = PermanentFlags.JVM_GC_OPTIONS.bindTo(flagSource) .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()); this.allowDisableMtls = PermanentFlags.ALLOW_DISABLE_MTLS.bindTo(flagSource) .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value(); } @Override public ModelContext.FeatureFlags featureFlags() { return featureFlags; } @Override public boolean multitenant() { return multitenant; } @Override public ApplicationId applicationId() { return applicationId; } @Override public List<ConfigServerSpec> configServerSpecs() { return configServerSpecs; } @Override public HostName loadBalancerName() { return loadBalancerName; } @Override public URI ztsUrl() { return ztsUrl; } @Override public String athenzDnsSuffix() { return athenzDnsSuffix; } @Override public boolean hostedVespa() { return hostedVespa; } @Override public Zone zone() { return zone; } @Override public Set<ContainerEndpoint> endpoints() { return endpoints; } @Override public boolean isBootstrap() { return isBootstrap; } @Override public boolean isFirstTimeDeployment() { return isFirstTimeDeployment; } @Override public Optional<EndpointCertificateSecrets> endpointCertificateSecrets() { return endpointCertificateSecrets; } @Override public Optional<AthenzDomain> athenzDomain() { return athenzDomain; } @Override public Optional<ApplicationRoles> applicationRoles() { return applicationRoles; } @Override public Quota quota() { return quota; } @Override public List<TenantSecretStore> tenantSecretStores() { return SecretStoreExternalIdRetriever.populateExternalId(secretStore, applicationId.tenant(), zone.system(), tenantSecretStores); } @Override public String jvmGCOptions(Optional<ClusterSpec.Type> clusterType) { return flagValueForClusterType(jvmGCOptionsFlag, clusterType); } @Override public String flagValueForClusterType(StringFlag flag, Optional<ClusterSpec.Type> clusterType) { return clusterType.map(type -> flag.with(CLUSTER_TYPE, type.name())) .orElse(flag) .value(); } }
I noticed, just removed it.
public void close() { synchronized (clusterTable.writeLock) { CairoEngine myEngine = engine.getAndSet(null); if (myEngine != null) { myEngine.close(); } } }
synchronized (clusterTable.writeLock) {
public void close() { if (closed.getAndSet(true)) return; synchronized (nodeTable.writeLock) { synchronized (clusterTable.writeLock) { for (SqlCompiler sqlCompiler : sqlCompilerPool) sqlCompiler.close(); engine.close(); } } }
class QuestMetricsDb extends AbstractComponent implements MetricsDb { private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName()); private final Table nodeTable; private final Table clusterTable; private final Clock clock; private final String dataDir; private final AtomicReference<CairoEngine> engine = new AtomicReference<>(); private final ThreadLocal<SqlCompiler> sqlCompiler; private final AtomicInteger nullRecords = new AtomicInteger(); @Inject public QuestMetricsDb() { this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC()); } public QuestMetricsDb(String dataDir, Clock clock) { this.clock = clock; if (dataDir.startsWith(Defaults.getDefaults().vespaHome()) && ! new File(Defaults.getDefaults().vespaHome()).exists()) dataDir = "data"; String logConfig = dataDir + "/quest-log.conf"; IOUtils.createDirectory(logConfig); IOUtils.writeFile(new File(logConfig), new byte[0]); System.setProperty("out", logConfig); this.dataDir = dataDir; engine.set(new CairoEngine(new DefaultCairoConfiguration(dataDir))); sqlCompiler = ThreadLocal.withInitial(() -> new SqlCompiler(engine.get())); nodeTable = new Table(dataDir, "metrics", clock); clusterTable = new Table(dataDir, "clusterMetrics", clock); ensureTablesExist(); } @Override public Clock clock() { return clock; } @Override public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) { try { addNodeMetricsBody(snapshots); } catch (CairoException e) { if (e.getMessage().contains("Cannot read offset")) { nodeTable.repair(e); addNodeMetricsBody(snapshots); } } } private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) { synchronized (nodeTable.writeLock) { try (TableWriter writer = nodeTable.getWriter()) { for (var snapshot : snapshots) { Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at()); if (atMillis.isEmpty()) continue; TableWriter.Row row = writer.newRow(atMillis.get() * 1000); row.putStr(0, snapshot.getFirst()); row.putFloat(2, (float) snapshot.getSecond().load().cpu()); row.putFloat(3, (float) snapshot.getSecond().load().memory()); row.putFloat(4, (float) snapshot.getSecond().load().disk()); row.putLong(5, snapshot.getSecond().generation()); row.putBool(6, snapshot.getSecond().inService()); row.putBool(7, snapshot.getSecond().stable()); row.putFloat(8, (float) snapshot.getSecond().queryRate()); row.append(); } writer.commit(); } } } @Override public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) { try { addClusterMetricsBody(application, snapshots); } catch (CairoException e) { if (e.getMessage().contains("Cannot read offset")) { clusterTable.repair(e); addClusterMetricsBody(application, snapshots); } } } private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) { synchronized (clusterTable.writeLock) { try (TableWriter writer = clusterTable.getWriter()) { for (var snapshot : snapshots.entrySet()) { Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at()); if (atMillis.isEmpty()) continue; TableWriter.Row row = writer.newRow(atMillis.get() * 1000); row.putStr(0, applicationId.serializedForm()); row.putStr(1, snapshot.getKey().value()); row.putFloat(3, (float) snapshot.getValue().queryRate()); row.putFloat(4, (float) snapshot.getValue().writeRate()); row.append(); } writer.commit(); } } } @Override public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) { try { var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext()); return snapshots.entrySet().stream() .map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue())) .collect(Collectors.toList()); } catch (SqlException e) { throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e); } } @Override public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) { try { return getClusterSnapshots(applicationId, clusterId); } catch (SqlException e) { throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e); } } public int getNullRecordsCount() { return nullRecords.get(); } @Override public void gc() { nullRecords.set(0); nodeTable.gc(); clusterTable.gc(); } @Override public void deconstruct() { close(); } @Override private void ensureTablesExist() { if (nodeTable.exists()) ensureNodeTableIsUpdated(); else createNodeTable(); if (clusterTable.exists()) ensureClusterTableIsUpdated(); else createClusterTable(); } private void ensureNodeTableIsUpdated() { try { } catch (Exception e) { nodeTable.repair(e); } } private void ensureClusterTableIsUpdated() { try { if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) { } } catch (Exception e) { clusterTable.repair(e); } } private void createNodeTable() { try { issue("create table " + nodeTable.name + " (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," + " application_generation long, inService boolean, stable boolean, queries_rate float)" + " timestamp(at)" + "PARTITION BY DAY;", newContext()); } catch (SqlException e) { throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e); } } private void createClusterTable() { try { issue("create table " + clusterTable.name + " (application string, cluster string, at timestamp, queries_rate float, write_rate float)" + " timestamp(at)" + "PARTITION BY DAY;", newContext()); } catch (SqlException e) { throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e); } } private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime, Set<String> hostnames, SqlExecutionContext context) throws SqlException { DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC")); String from = formatter.format(startTime).substring(0, 19) + ".000000Z"; String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z"; String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');"; try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) { ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>(); try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { if (record == null || record.getStr(0) == null) { nullRecords.incrementAndGet(); continue; } String hostname = record.getStr(0).toString(); if (hostnames.isEmpty() || hostnames.contains(hostname)) { snapshots.put(hostname, new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000), new Load(record.getFloat(2), record.getFloat(3), record.getFloat(4)), record.getLong(5), record.getBool(6), record.getBool(7), record.getFloat(8))); } } } return snapshots; } } private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException { String sql = "select * from " + clusterTable.name; var context = newContext(); try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) { List<ClusterMetricSnapshot> snapshots = new ArrayList<>(); try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { String applicationIdString = record.getStr(0).toString(); if ( ! application.serializedForm().equals(applicationIdString)) continue; String clusterId = record.getStr(1).toString(); if (cluster.value().equals(clusterId)) { snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000), record.getFloat(3), record.getFloat(4))); } } } return new ClusterTimeseries(cluster, snapshots); } } /** Issues an SQL statement against the QuestDb engine */ private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException { return sqlCompiler.get().compile(sql, context); } private SqlExecutionContext newContext() { return new SqlExecutionContextImpl(engine.get(), 1); } /** A questDb table */ private class Table { private final Object writeLock = new Object(); private final String name; private final Clock clock; private final File dir; private long highestTimestampAdded = 0; Table(String dataDir, String name, Clock clock) { this.name = name; this.clock = clock; this.dir = new File(dataDir, name); IOUtils.createDirectory(dir.getPath()); new File(dir + "/_txn_scoreboard").delete(); } boolean exists() { return 0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), name); } TableWriter getWriter() { return engine.get().getWriter(newContext().getCairoSecurityContext(), name); } void gc() { synchronized (writeLock) { Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4)); SqlExecutionContext context = newContext(); int partitions = 0; try { List<String> removeList = new ArrayList<>(); for (String dirEntry : dir.list()) { File partitionDir = new File(dir, dirEntry); if (!partitionDir.isDirectory()) continue; partitions++; DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC")); Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00")); if (partitionDay.isBefore(oldestToKeep)) removeList.add(dirEntry); } if (removeList.size() < partitions && !removeList.isEmpty()) { issue("alter table " + name + " drop partition list " + removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")), context); } } catch (SqlException e) { log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e); } } } /** * Repairs this db on corruption. * * @param e the exception indicating corruption */ private void repair(Exception e) { log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e); IOUtils.recursiveDeleteDir(dir); IOUtils.createDirectory(dir.getPath()); ensureTablesExist(); } void ensureColumnExists(String column, String columnType) throws SqlException { if (columnNames().contains(column)) return; issue("alter table " + name + " add column " + column + " " + columnType, newContext()); } private Optional<Long> adjustOrDiscard(Instant at) { long timestamp = at.toEpochMilli(); if (timestamp >= highestTimestampAdded) { highestTimestampAdded = timestamp; return Optional.of(timestamp); } if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded); return Optional.empty(); } private List<String> columnNames() throws SqlException { var context = newContext(); List<String> columns = new ArrayList<>(); try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) { try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { columns.add(record.getStr(0).toString()); } } } return columns; } } }
class QuestMetricsDb extends AbstractComponent implements MetricsDb { private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName()); private final Table nodeTable; private final Table clusterTable; private final Clock clock; private final String dataDir; private final CairoEngine engine; private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool; private final AtomicBoolean closed = new AtomicBoolean(false); @Inject public QuestMetricsDb() { this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC()); } public QuestMetricsDb(String dataDir, Clock clock) { this.clock = clock; if (dataDir.startsWith(Defaults.getDefaults().vespaHome()) && ! new File(Defaults.getDefaults().vespaHome()).exists()) dataDir = "data"; String logConfig = dataDir + "/quest-log.conf"; IOUtils.createDirectory(logConfig); IOUtils.writeFile(new File(logConfig), new byte[0]); System.setProperty("out", logConfig); this.dataDir = dataDir; engine = new CairoEngine(new DefaultCairoConfiguration(dataDir)); sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine())); nodeTable = new Table(dataDir, "metrics", clock); clusterTable = new Table(dataDir, "clusterMetrics", clock); ensureTablesExist(); } private CairoEngine engine() { if (closed.get()) throw new IllegalStateException("Attempted to access QuestDb after calling close"); return engine; } @Override public Clock clock() { return clock; } @Override public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) { try { addNodeMetricsBody(snapshots); } catch (CairoException e) { if (e.getMessage().contains("Cannot read offset")) { nodeTable.repair(e); addNodeMetricsBody(snapshots); } } } private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) { synchronized (nodeTable.writeLock) { try (TableWriter writer = nodeTable.getWriter()) { for (var snapshot : snapshots) { Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at()); if (atMillis.isEmpty()) continue; TableWriter.Row row = writer.newRow(atMillis.get() * 1000); row.putStr(0, snapshot.getFirst()); row.putFloat(2, (float) snapshot.getSecond().load().cpu()); row.putFloat(3, (float) snapshot.getSecond().load().memory()); row.putFloat(4, (float) snapshot.getSecond().load().disk()); row.putLong(5, snapshot.getSecond().generation()); row.putBool(6, snapshot.getSecond().inService()); row.putBool(7, snapshot.getSecond().stable()); row.putFloat(8, (float) snapshot.getSecond().queryRate()); row.append(); } writer.commit(); } } } @Override public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) { try { addClusterMetricsBody(application, snapshots); } catch (CairoException e) { if (e.getMessage().contains("Cannot read offset")) { clusterTable.repair(e); addClusterMetricsBody(application, snapshots); } } } private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) { synchronized (clusterTable.writeLock) { try (TableWriter writer = clusterTable.getWriter()) { for (var snapshot : snapshots.entrySet()) { Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at()); if (atMillis.isEmpty()) continue; TableWriter.Row row = writer.newRow(atMillis.get() * 1000); row.putStr(0, applicationId.serializedForm()); row.putStr(1, snapshot.getKey().value()); row.putFloat(3, (float) snapshot.getValue().queryRate()); row.putFloat(4, (float) snapshot.getValue().writeRate()); row.append(); } writer.commit(); } } } @Override public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) { try { var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext()); return snapshots.entrySet().stream() .map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue())) .collect(Collectors.toList()); } catch (SqlException e) { throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e); } } @Override public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) { try { return getClusterSnapshots(applicationId, clusterId); } catch (SqlException e) { throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e); } } @Override public void gc() { nodeTable.gc(); clusterTable.gc(); } @Override public void deconstruct() { close(); } @Override private void ensureTablesExist() { if (nodeTable.exists()) ensureNodeTableIsUpdated(); else createNodeTable(); if (clusterTable.exists()) ensureClusterTableIsUpdated(); else createClusterTable(); } private void ensureNodeTableIsUpdated() { try { } catch (Exception e) { nodeTable.repair(e); } } private void ensureClusterTableIsUpdated() { try { if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) { } } catch (Exception e) { clusterTable.repair(e); } } private void createNodeTable() { try { issue("create table " + nodeTable.name + " (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," + " application_generation long, inService boolean, stable boolean, queries_rate float)" + " timestamp(at)" + "PARTITION BY DAY;", newContext()); } catch (SqlException e) { throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e); } } private void createClusterTable() { try { issue("create table " + clusterTable.name + " (application string, cluster string, at timestamp, queries_rate float, write_rate float)" + " timestamp(at)" + "PARTITION BY DAY;", newContext()); } catch (SqlException e) { throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e); } } private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime, Set<String> hostnames, SqlExecutionContext context) throws SqlException { DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC")); String from = formatter.format(startTime).substring(0, 19) + ".000000Z"; String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z"; String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');"; try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) { ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>(); try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { String hostname = record.getStr(0).toString(); if (hostnames.isEmpty() || hostnames.contains(hostname)) { snapshots.put(hostname, new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000), new Load(record.getFloat(2), record.getFloat(3), record.getFloat(4)), record.getLong(5), record.getBool(6), record.getBool(7), record.getFloat(8))); } } } return snapshots; } } private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException { String sql = "select * from " + clusterTable.name; var context = newContext(); try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) { List<ClusterMetricSnapshot> snapshots = new ArrayList<>(); try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { String applicationIdString = record.getStr(0).toString(); if ( ! application.serializedForm().equals(applicationIdString)) continue; String clusterId = record.getStr(1).toString(); if (cluster.value().equals(clusterId)) { snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000), record.getFloat(3), record.getFloat(4))); } } } return new ClusterTimeseries(cluster, snapshots); } } /** Issues an SQL statement against the QuestDb engine */ private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException { SqlCompiler sqlCompiler = sqlCompilerPool.alloc(); try { return sqlCompiler.compile(sql, context); } finally { sqlCompilerPool.free(sqlCompiler); } } private SqlExecutionContext newContext() { return new SqlExecutionContextImpl(engine(), 1); } /** A questDb table */ private class Table { private final Object writeLock = new Object(); private final String name; private final Clock clock; private final File dir; private long highestTimestampAdded = 0; Table(String dataDir, String name, Clock clock) { this.name = name; this.clock = clock; this.dir = new File(dataDir, name); IOUtils.createDirectory(dir.getPath()); new File(dir + "/_txn_scoreboard").delete(); } boolean exists() { return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name); } TableWriter getWriter() { return engine().getWriter(newContext().getCairoSecurityContext(), name); } void gc() { synchronized (writeLock) { Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4)); SqlExecutionContext context = newContext(); int partitions = 0; try { List<String> removeList = new ArrayList<>(); for (String dirEntry : dir.list()) { File partitionDir = new File(dir, dirEntry); if (!partitionDir.isDirectory()) continue; partitions++; DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC")); Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00")); if (partitionDay.isBefore(oldestToKeep)) removeList.add(dirEntry); } if (removeList.size() < partitions && !removeList.isEmpty()) { issue("alter table " + name + " drop partition list " + removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")), context); } } catch (SqlException e) { log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e); } } } /** * Repairs this db on corruption. * * @param e the exception indicating corruption */ private void repair(Exception e) { log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e); IOUtils.recursiveDeleteDir(dir); IOUtils.createDirectory(dir.getPath()); ensureTablesExist(); } void ensureColumnExists(String column, String columnType) throws SqlException { if (columnNames().contains(column)) return; issue("alter table " + name + " add column " + column + " " + columnType, newContext()); } private Optional<Long> adjustOrDiscard(Instant at) { long timestamp = at.toEpochMilli(); if (timestamp >= highestTimestampAdded) { highestTimestampAdded = timestamp; return Optional.of(timestamp); } if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded); return Optional.empty(); } private List<String> columnNames() throws SqlException { var context = newContext(); List<String> columns = new ArrayList<>(); try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) { try (RecordCursor cursor = factory.getCursor(context)) { Record record = cursor.getRecord(); while (cursor.hasNext()) { columns.add(record.getStr(0).toString()); } } } return columns; } } }
Could also delegate `awaitTermination` to both parties.
public List<Runnable> shutdownNow() { super.shutdownNow(); return executorService.shutdownNow(); }
}
public List<Runnable> shutdownNow() { super.shutdownNow(); return executorService.shutdownNow(); }
class ZkWatcherExecutorService extends ThreadPoolExecutor { private final ExecutorService executorService = Executors.newCachedThreadPool(new DaemonThreadFactory("zk-session-watcher-")); public ZkWatcherExecutorService() { super(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); } @Override public void execute(Runnable command) { super.execute(() -> executorService.execute(command)); } @Override public void shutdown() { super.shutdown(); executorService.shutdown(); } @Override }
class ZkWatcherExecutorService extends ThreadPoolExecutor { private final ExecutorService executorService = Executors.newCachedThreadPool(new DaemonThreadFactory("zk-session-watcher-")); public ZkWatcherExecutorService() { super(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); } @Override public void execute(Runnable command) { super.execute(() -> executorService.execute(command)); } @Override public void shutdown() { super.shutdown(); executorService.shutdown(); } @Override @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { if ( ! super.awaitTermination(timeout, unit)) return false; return executorService.awaitTermination(timeout, unit); } }
Yup, will add
public List<Runnable> shutdownNow() { super.shutdownNow(); return executorService.shutdownNow(); }
}
public List<Runnable> shutdownNow() { super.shutdownNow(); return executorService.shutdownNow(); }
class ZkWatcherExecutorService extends ThreadPoolExecutor { private final ExecutorService executorService = Executors.newCachedThreadPool(new DaemonThreadFactory("zk-session-watcher-")); public ZkWatcherExecutorService() { super(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); } @Override public void execute(Runnable command) { super.execute(() -> executorService.execute(command)); } @Override public void shutdown() { super.shutdown(); executorService.shutdown(); } @Override }
class ZkWatcherExecutorService extends ThreadPoolExecutor { private final ExecutorService executorService = Executors.newCachedThreadPool(new DaemonThreadFactory("zk-session-watcher-")); public ZkWatcherExecutorService() { super(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); } @Override public void execute(Runnable command) { super.execute(() -> executorService.execute(command)); } @Override public void shutdown() { super.shutdown(); executorService.shutdown(); } @Override @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { if ( ! super.awaitTermination(timeout, unit)) return false; return executorService.awaitTermination(timeout, unit); } }
This should be minimum?
private static Duration computeScalingDuration(Cluster cluster, ClusterSpec clusterSpec) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get())); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if ( clusterSpec.isStateful() ) predictedDuration = minimum(Duration.ofHours(12), predictedDuration); predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration); return predictedDuration; } }
totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get()));
private static Duration computeScalingDuration(Cluster cluster, ClusterSpec clusterSpec) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get())); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if ( clusterSpec.isStateful() ) predictedDuration = minimum(Duration.ofHours(12), predictedDuration); predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration); return predictedDuration; } }
class ClusterModel { static final double idealQueryCpuLoad = 0.8; static final double idealWriteCpuLoad = 0.95; static final double idealMemoryLoad = 0.7; static final double idealDiskLoad = 0.6; private final Application application; private final Cluster cluster; /** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */ private final NodeList nodes; private final MetricsDb metricsDb; private final Clock clock; private final Duration scalingDuration; private Double queryFractionOfMax = null; private Double maxQueryGrowthRate = null; private ClusterNodesTimeseries nodeTimeseries = null; private ClusterTimeseries clusterTimeseries = null; public ClusterModel(Application application, Cluster cluster, ClusterSpec clusterSpec, NodeList clusterNodes, MetricsDb metricsDb, Clock clock) { this.application = application; this.cluster = cluster; this.nodes = clusterNodes; this.metricsDb = metricsDb; this.clock = clock; this.scalingDuration = computeScalingDuration(cluster, clusterSpec); } /** For testing */ ClusterModel(Application application, Cluster cluster, Clock clock, Duration scalingDuration, ClusterTimeseries clusterTimeseries) { this.application = application; this.cluster = cluster; this.nodes = null; this.metricsDb = null; this.clock = clock; this.scalingDuration = scalingDuration; this.clusterTimeseries = clusterTimeseries; } /** Returns the predicted duration of a rescaling of this cluster */ public Duration scalingDuration() { return scalingDuration; } public ClusterNodesTimeseries nodeTimeseries() { if (nodeTimeseries != null) return nodeTimeseries; return nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb); } public ClusterTimeseries clusterTimeseries() { if (clusterTimeseries != null) return clusterTimeseries; return clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id()); } /** * Returns the predicted max query growth rate per minute as a fraction of the average traffic * in the scaling window */ public double maxQueryGrowthRate() { if (maxQueryGrowthRate != null) return maxQueryGrowthRate; return maxQueryGrowthRate = clusterTimeseries().maxQueryGrowthRate(scalingDuration(), clock); } /** Returns the average query rate in the scaling window as a fraction of the max observed query rate */ public double queryFractionOfMax() { if (queryFractionOfMax != null) return queryFractionOfMax; return queryFractionOfMax = clusterTimeseries().queryFractionOfMax(scalingDuration(), clock); } public Load averageLoad() { return nodeTimeseries().averageLoad(); } public Load idealLoad() { return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad); } /** Ideal cpu load must take the application traffic fraction into account */ private double idealCpuLoad() { double queryCpuFraction = queryCpuFraction(); double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes(); if (queryFractionOfMax() != 0) growthRateHeadroom = Math.min(growthRateHeadroom, 1 / queryFractionOfMax() + 0.1); double maxTrafficShiftHeadroom = 10.0; double trafficShiftHeadroom; if (application.status().maxReadShare() == 0) trafficShiftHeadroom = 2.0; else if (application.status().currentReadShare() == 0) trafficShiftHeadroom = maxTrafficShiftHeadroom; else trafficShiftHeadroom = application.status().maxReadShare() / application.status().currentReadShare(); trafficShiftHeadroom = Math.min(trafficShiftHeadroom, maxTrafficShiftHeadroom); return queryCpuFraction * 1 / growthRateHeadroom * 1 / trafficShiftHeadroom * idealQueryCpuLoad + (1 - queryCpuFraction) * idealWriteCpuLoad; } /** The estimated fraction of cpu usage which goes to processing queries vs. writes */ public double queryCpuFraction() { OptionalDouble queryRate = clusterTimeseries().queryRate(scalingDuration(), clock); OptionalDouble writeRate = clusterTimeseries().writeRate(scalingDuration(), clock); if (queryRate.orElse(0) == 0 && writeRate.orElse(0) == 0) return queryCpuFraction(0.5); return queryCpuFraction(queryRate.orElse(0) / (queryRate.orElse(0) + writeRate.orElse(0))); } private double queryCpuFraction(double queryRateFraction) { double relativeQueryCost = 9; double writeFraction = 1 - queryRateFraction; return queryRateFraction * relativeQueryCost / (queryRateFraction * relativeQueryCost + writeFraction); } private static Duration minimum(Duration smallestAllowed, Duration duration) { if (duration.minus(smallestAllowed).isNegative()) return smallestAllowed; return duration; } private static Duration maximum(Duration largestAllowed, Duration duration) { if ( ! duration.minus(largestAllowed).isNegative()) return largestAllowed; return duration; } }
class ClusterModel { static final double idealQueryCpuLoad = 0.8; static final double idealWriteCpuLoad = 0.95; static final double idealMemoryLoad = 0.7; static final double idealDiskLoad = 0.6; private final Application application; private final Cluster cluster; /** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */ private final NodeList nodes; private final MetricsDb metricsDb; private final Clock clock; private final Duration scalingDuration; private Double queryFractionOfMax = null; private Double maxQueryGrowthRate = null; private ClusterNodesTimeseries nodeTimeseries = null; private ClusterTimeseries clusterTimeseries = null; public ClusterModel(Application application, Cluster cluster, ClusterSpec clusterSpec, NodeList clusterNodes, MetricsDb metricsDb, Clock clock) { this.application = application; this.cluster = cluster; this.nodes = clusterNodes; this.metricsDb = metricsDb; this.clock = clock; this.scalingDuration = computeScalingDuration(cluster, clusterSpec); } /** For testing */ ClusterModel(Application application, Cluster cluster, Clock clock, Duration scalingDuration, ClusterTimeseries clusterTimeseries) { this.application = application; this.cluster = cluster; this.nodes = null; this.metricsDb = null; this.clock = clock; this.scalingDuration = scalingDuration; this.clusterTimeseries = clusterTimeseries; } /** Returns the predicted duration of a rescaling of this cluster */ public Duration scalingDuration() { return scalingDuration; } public ClusterNodesTimeseries nodeTimeseries() { if (nodeTimeseries != null) return nodeTimeseries; return nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb); } public ClusterTimeseries clusterTimeseries() { if (clusterTimeseries != null) return clusterTimeseries; return clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id()); } /** * Returns the predicted max query growth rate per minute as a fraction of the average traffic * in the scaling window */ public double maxQueryGrowthRate() { if (maxQueryGrowthRate != null) return maxQueryGrowthRate; return maxQueryGrowthRate = clusterTimeseries().maxQueryGrowthRate(scalingDuration(), clock); } /** Returns the average query rate in the scaling window as a fraction of the max observed query rate */ public double queryFractionOfMax() { if (queryFractionOfMax != null) return queryFractionOfMax; return queryFractionOfMax = clusterTimeseries().queryFractionOfMax(scalingDuration(), clock); } public Load averageLoad() { return nodeTimeseries().averageLoad(); } public Load idealLoad() { return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad); } /** Ideal cpu load must take the application traffic fraction into account */ private double idealCpuLoad() { double queryCpuFraction = queryCpuFraction(); double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes(); if (queryFractionOfMax() != 0) growthRateHeadroom = Math.min(growthRateHeadroom, 1 / queryFractionOfMax() + 0.1); double maxTrafficShiftHeadroom = 10.0; double trafficShiftHeadroom; if (application.status().maxReadShare() == 0) trafficShiftHeadroom = 2.0; else if (application.status().currentReadShare() == 0) trafficShiftHeadroom = maxTrafficShiftHeadroom; else trafficShiftHeadroom = application.status().maxReadShare() / application.status().currentReadShare(); trafficShiftHeadroom = Math.min(trafficShiftHeadroom, maxTrafficShiftHeadroom); return queryCpuFraction * 1 / growthRateHeadroom * 1 / trafficShiftHeadroom * idealQueryCpuLoad + (1 - queryCpuFraction) * idealWriteCpuLoad; } /** The estimated fraction of cpu usage which goes to processing queries vs. writes */ public double queryCpuFraction() { OptionalDouble queryRate = clusterTimeseries().queryRate(scalingDuration(), clock); OptionalDouble writeRate = clusterTimeseries().writeRate(scalingDuration(), clock); if (queryRate.orElse(0) == 0 && writeRate.orElse(0) == 0) return queryCpuFraction(0.5); return queryCpuFraction(queryRate.orElse(0) / (queryRate.orElse(0) + writeRate.orElse(0))); } private double queryCpuFraction(double queryRateFraction) { double relativeQueryCost = 9; double writeFraction = 1 - queryRateFraction; return queryRateFraction * relativeQueryCost / (queryRateFraction * relativeQueryCost + writeFraction); } private static Duration minimum(Duration smallestAllowed, Duration duration) { if (duration.minus(smallestAllowed).isNegative()) return smallestAllowed; return duration; } private static Duration maximum(Duration largestAllowed, Duration duration) { if ( ! duration.minus(largestAllowed).isNegative()) return largestAllowed; return duration; } }
The duration shall be maximum 4 days. Isn't that clear?
private static Duration computeScalingDuration(Cluster cluster, ClusterSpec clusterSpec) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get())); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if ( clusterSpec.isStateful() ) predictedDuration = minimum(Duration.ofHours(12), predictedDuration); predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration); return predictedDuration; } }
totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get()));
private static Duration computeScalingDuration(Cluster cluster, ClusterSpec clusterSpec) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get())); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if ( clusterSpec.isStateful() ) predictedDuration = minimum(Duration.ofHours(12), predictedDuration); predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration); return predictedDuration; } }
class ClusterModel { static final double idealQueryCpuLoad = 0.8; static final double idealWriteCpuLoad = 0.95; static final double idealMemoryLoad = 0.7; static final double idealDiskLoad = 0.6; private final Application application; private final Cluster cluster; /** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */ private final NodeList nodes; private final MetricsDb metricsDb; private final Clock clock; private final Duration scalingDuration; private Double queryFractionOfMax = null; private Double maxQueryGrowthRate = null; private ClusterNodesTimeseries nodeTimeseries = null; private ClusterTimeseries clusterTimeseries = null; public ClusterModel(Application application, Cluster cluster, ClusterSpec clusterSpec, NodeList clusterNodes, MetricsDb metricsDb, Clock clock) { this.application = application; this.cluster = cluster; this.nodes = clusterNodes; this.metricsDb = metricsDb; this.clock = clock; this.scalingDuration = computeScalingDuration(cluster, clusterSpec); } /** For testing */ ClusterModel(Application application, Cluster cluster, Clock clock, Duration scalingDuration, ClusterTimeseries clusterTimeseries) { this.application = application; this.cluster = cluster; this.nodes = null; this.metricsDb = null; this.clock = clock; this.scalingDuration = scalingDuration; this.clusterTimeseries = clusterTimeseries; } /** Returns the predicted duration of a rescaling of this cluster */ public Duration scalingDuration() { return scalingDuration; } public ClusterNodesTimeseries nodeTimeseries() { if (nodeTimeseries != null) return nodeTimeseries; return nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb); } public ClusterTimeseries clusterTimeseries() { if (clusterTimeseries != null) return clusterTimeseries; return clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id()); } /** * Returns the predicted max query growth rate per minute as a fraction of the average traffic * in the scaling window */ public double maxQueryGrowthRate() { if (maxQueryGrowthRate != null) return maxQueryGrowthRate; return maxQueryGrowthRate = clusterTimeseries().maxQueryGrowthRate(scalingDuration(), clock); } /** Returns the average query rate in the scaling window as a fraction of the max observed query rate */ public double queryFractionOfMax() { if (queryFractionOfMax != null) return queryFractionOfMax; return queryFractionOfMax = clusterTimeseries().queryFractionOfMax(scalingDuration(), clock); } public Load averageLoad() { return nodeTimeseries().averageLoad(); } public Load idealLoad() { return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad); } /** Ideal cpu load must take the application traffic fraction into account */ private double idealCpuLoad() { double queryCpuFraction = queryCpuFraction(); double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes(); if (queryFractionOfMax() != 0) growthRateHeadroom = Math.min(growthRateHeadroom, 1 / queryFractionOfMax() + 0.1); double maxTrafficShiftHeadroom = 10.0; double trafficShiftHeadroom; if (application.status().maxReadShare() == 0) trafficShiftHeadroom = 2.0; else if (application.status().currentReadShare() == 0) trafficShiftHeadroom = maxTrafficShiftHeadroom; else trafficShiftHeadroom = application.status().maxReadShare() / application.status().currentReadShare(); trafficShiftHeadroom = Math.min(trafficShiftHeadroom, maxTrafficShiftHeadroom); return queryCpuFraction * 1 / growthRateHeadroom * 1 / trafficShiftHeadroom * idealQueryCpuLoad + (1 - queryCpuFraction) * idealWriteCpuLoad; } /** The estimated fraction of cpu usage which goes to processing queries vs. writes */ public double queryCpuFraction() { OptionalDouble queryRate = clusterTimeseries().queryRate(scalingDuration(), clock); OptionalDouble writeRate = clusterTimeseries().writeRate(scalingDuration(), clock); if (queryRate.orElse(0) == 0 && writeRate.orElse(0) == 0) return queryCpuFraction(0.5); return queryCpuFraction(queryRate.orElse(0) / (queryRate.orElse(0) + writeRate.orElse(0))); } private double queryCpuFraction(double queryRateFraction) { double relativeQueryCost = 9; double writeFraction = 1 - queryRateFraction; return queryRateFraction * relativeQueryCost / (queryRateFraction * relativeQueryCost + writeFraction); } private static Duration minimum(Duration smallestAllowed, Duration duration) { if (duration.minus(smallestAllowed).isNegative()) return smallestAllowed; return duration; } private static Duration maximum(Duration largestAllowed, Duration duration) { if ( ! duration.minus(largestAllowed).isNegative()) return largestAllowed; return duration; } }
class ClusterModel { static final double idealQueryCpuLoad = 0.8; static final double idealWriteCpuLoad = 0.95; static final double idealMemoryLoad = 0.7; static final double idealDiskLoad = 0.6; private final Application application; private final Cluster cluster; /** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */ private final NodeList nodes; private final MetricsDb metricsDb; private final Clock clock; private final Duration scalingDuration; private Double queryFractionOfMax = null; private Double maxQueryGrowthRate = null; private ClusterNodesTimeseries nodeTimeseries = null; private ClusterTimeseries clusterTimeseries = null; public ClusterModel(Application application, Cluster cluster, ClusterSpec clusterSpec, NodeList clusterNodes, MetricsDb metricsDb, Clock clock) { this.application = application; this.cluster = cluster; this.nodes = clusterNodes; this.metricsDb = metricsDb; this.clock = clock; this.scalingDuration = computeScalingDuration(cluster, clusterSpec); } /** For testing */ ClusterModel(Application application, Cluster cluster, Clock clock, Duration scalingDuration, ClusterTimeseries clusterTimeseries) { this.application = application; this.cluster = cluster; this.nodes = null; this.metricsDb = null; this.clock = clock; this.scalingDuration = scalingDuration; this.clusterTimeseries = clusterTimeseries; } /** Returns the predicted duration of a rescaling of this cluster */ public Duration scalingDuration() { return scalingDuration; } public ClusterNodesTimeseries nodeTimeseries() { if (nodeTimeseries != null) return nodeTimeseries; return nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb); } public ClusterTimeseries clusterTimeseries() { if (clusterTimeseries != null) return clusterTimeseries; return clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id()); } /** * Returns the predicted max query growth rate per minute as a fraction of the average traffic * in the scaling window */ public double maxQueryGrowthRate() { if (maxQueryGrowthRate != null) return maxQueryGrowthRate; return maxQueryGrowthRate = clusterTimeseries().maxQueryGrowthRate(scalingDuration(), clock); } /** Returns the average query rate in the scaling window as a fraction of the max observed query rate */ public double queryFractionOfMax() { if (queryFractionOfMax != null) return queryFractionOfMax; return queryFractionOfMax = clusterTimeseries().queryFractionOfMax(scalingDuration(), clock); } public Load averageLoad() { return nodeTimeseries().averageLoad(); } public Load idealLoad() { return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad); } /** Ideal cpu load must take the application traffic fraction into account */ private double idealCpuLoad() { double queryCpuFraction = queryCpuFraction(); double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes(); if (queryFractionOfMax() != 0) growthRateHeadroom = Math.min(growthRateHeadroom, 1 / queryFractionOfMax() + 0.1); double maxTrafficShiftHeadroom = 10.0; double trafficShiftHeadroom; if (application.status().maxReadShare() == 0) trafficShiftHeadroom = 2.0; else if (application.status().currentReadShare() == 0) trafficShiftHeadroom = maxTrafficShiftHeadroom; else trafficShiftHeadroom = application.status().maxReadShare() / application.status().currentReadShare(); trafficShiftHeadroom = Math.min(trafficShiftHeadroom, maxTrafficShiftHeadroom); return queryCpuFraction * 1 / growthRateHeadroom * 1 / trafficShiftHeadroom * idealQueryCpuLoad + (1 - queryCpuFraction) * idealWriteCpuLoad; } /** The estimated fraction of cpu usage which goes to processing queries vs. writes */ public double queryCpuFraction() { OptionalDouble queryRate = clusterTimeseries().queryRate(scalingDuration(), clock); OptionalDouble writeRate = clusterTimeseries().writeRate(scalingDuration(), clock); if (queryRate.orElse(0) == 0 && writeRate.orElse(0) == 0) return queryCpuFraction(0.5); return queryCpuFraction(queryRate.orElse(0) / (queryRate.orElse(0) + writeRate.orElse(0))); } private double queryCpuFraction(double queryRateFraction) { double relativeQueryCost = 9; double writeFraction = 1 - queryRateFraction; return queryRateFraction * relativeQueryCost / (queryRateFraction * relativeQueryCost + writeFraction); } private static Duration minimum(Duration smallestAllowed, Duration duration) { if (duration.minus(smallestAllowed).isNegative()) return smallestAllowed; return duration; } private static Duration maximum(Duration largestAllowed, Duration duration) { if ( ! duration.minus(largestAllowed).isNegative()) return largestAllowed; return duration; } }
Uhh, I guess. I thought this worked same as `Math.max()`, i.e. it returned the max of the 2 args, but this depends on the position of args and duration-minus-isNegative isn't the easiest thing to read.
private static Duration computeScalingDuration(Cluster cluster, ClusterSpec clusterSpec) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get())); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if ( clusterSpec.isStateful() ) predictedDuration = minimum(Duration.ofHours(12), predictedDuration); predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration); return predictedDuration; } }
totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get()));
private static Duration computeScalingDuration(Cluster cluster, ClusterSpec clusterSpec) { int completedEventCount = 0; Duration totalDuration = Duration.ZERO; for (ScalingEvent event : cluster.scalingEvents()) { if (event.duration().isEmpty()) continue; completedEventCount++; totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get())); } if (completedEventCount == 0) { if (clusterSpec.isStateful()) return Duration.ofHours(12); return Duration.ofMinutes(10); } else { Duration predictedDuration = totalDuration.dividedBy(completedEventCount); if ( clusterSpec.isStateful() ) predictedDuration = minimum(Duration.ofHours(12), predictedDuration); predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration); return predictedDuration; } }
class ClusterModel { static final double idealQueryCpuLoad = 0.8; static final double idealWriteCpuLoad = 0.95; static final double idealMemoryLoad = 0.7; static final double idealDiskLoad = 0.6; private final Application application; private final Cluster cluster; /** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */ private final NodeList nodes; private final MetricsDb metricsDb; private final Clock clock; private final Duration scalingDuration; private Double queryFractionOfMax = null; private Double maxQueryGrowthRate = null; private ClusterNodesTimeseries nodeTimeseries = null; private ClusterTimeseries clusterTimeseries = null; public ClusterModel(Application application, Cluster cluster, ClusterSpec clusterSpec, NodeList clusterNodes, MetricsDb metricsDb, Clock clock) { this.application = application; this.cluster = cluster; this.nodes = clusterNodes; this.metricsDb = metricsDb; this.clock = clock; this.scalingDuration = computeScalingDuration(cluster, clusterSpec); } /** For testing */ ClusterModel(Application application, Cluster cluster, Clock clock, Duration scalingDuration, ClusterTimeseries clusterTimeseries) { this.application = application; this.cluster = cluster; this.nodes = null; this.metricsDb = null; this.clock = clock; this.scalingDuration = scalingDuration; this.clusterTimeseries = clusterTimeseries; } /** Returns the predicted duration of a rescaling of this cluster */ public Duration scalingDuration() { return scalingDuration; } public ClusterNodesTimeseries nodeTimeseries() { if (nodeTimeseries != null) return nodeTimeseries; return nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb); } public ClusterTimeseries clusterTimeseries() { if (clusterTimeseries != null) return clusterTimeseries; return clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id()); } /** * Returns the predicted max query growth rate per minute as a fraction of the average traffic * in the scaling window */ public double maxQueryGrowthRate() { if (maxQueryGrowthRate != null) return maxQueryGrowthRate; return maxQueryGrowthRate = clusterTimeseries().maxQueryGrowthRate(scalingDuration(), clock); } /** Returns the average query rate in the scaling window as a fraction of the max observed query rate */ public double queryFractionOfMax() { if (queryFractionOfMax != null) return queryFractionOfMax; return queryFractionOfMax = clusterTimeseries().queryFractionOfMax(scalingDuration(), clock); } public Load averageLoad() { return nodeTimeseries().averageLoad(); } public Load idealLoad() { return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad); } /** Ideal cpu load must take the application traffic fraction into account */ private double idealCpuLoad() { double queryCpuFraction = queryCpuFraction(); double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes(); if (queryFractionOfMax() != 0) growthRateHeadroom = Math.min(growthRateHeadroom, 1 / queryFractionOfMax() + 0.1); double maxTrafficShiftHeadroom = 10.0; double trafficShiftHeadroom; if (application.status().maxReadShare() == 0) trafficShiftHeadroom = 2.0; else if (application.status().currentReadShare() == 0) trafficShiftHeadroom = maxTrafficShiftHeadroom; else trafficShiftHeadroom = application.status().maxReadShare() / application.status().currentReadShare(); trafficShiftHeadroom = Math.min(trafficShiftHeadroom, maxTrafficShiftHeadroom); return queryCpuFraction * 1 / growthRateHeadroom * 1 / trafficShiftHeadroom * idealQueryCpuLoad + (1 - queryCpuFraction) * idealWriteCpuLoad; } /** The estimated fraction of cpu usage which goes to processing queries vs. writes */ public double queryCpuFraction() { OptionalDouble queryRate = clusterTimeseries().queryRate(scalingDuration(), clock); OptionalDouble writeRate = clusterTimeseries().writeRate(scalingDuration(), clock); if (queryRate.orElse(0) == 0 && writeRate.orElse(0) == 0) return queryCpuFraction(0.5); return queryCpuFraction(queryRate.orElse(0) / (queryRate.orElse(0) + writeRate.orElse(0))); } private double queryCpuFraction(double queryRateFraction) { double relativeQueryCost = 9; double writeFraction = 1 - queryRateFraction; return queryRateFraction * relativeQueryCost / (queryRateFraction * relativeQueryCost + writeFraction); } private static Duration minimum(Duration smallestAllowed, Duration duration) { if (duration.minus(smallestAllowed).isNegative()) return smallestAllowed; return duration; } private static Duration maximum(Duration largestAllowed, Duration duration) { if ( ! duration.minus(largestAllowed).isNegative()) return largestAllowed; return duration; } }
class ClusterModel { static final double idealQueryCpuLoad = 0.8; static final double idealWriteCpuLoad = 0.95; static final double idealMemoryLoad = 0.7; static final double idealDiskLoad = 0.6; private final Application application; private final Cluster cluster; /** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */ private final NodeList nodes; private final MetricsDb metricsDb; private final Clock clock; private final Duration scalingDuration; private Double queryFractionOfMax = null; private Double maxQueryGrowthRate = null; private ClusterNodesTimeseries nodeTimeseries = null; private ClusterTimeseries clusterTimeseries = null; public ClusterModel(Application application, Cluster cluster, ClusterSpec clusterSpec, NodeList clusterNodes, MetricsDb metricsDb, Clock clock) { this.application = application; this.cluster = cluster; this.nodes = clusterNodes; this.metricsDb = metricsDb; this.clock = clock; this.scalingDuration = computeScalingDuration(cluster, clusterSpec); } /** For testing */ ClusterModel(Application application, Cluster cluster, Clock clock, Duration scalingDuration, ClusterTimeseries clusterTimeseries) { this.application = application; this.cluster = cluster; this.nodes = null; this.metricsDb = null; this.clock = clock; this.scalingDuration = scalingDuration; this.clusterTimeseries = clusterTimeseries; } /** Returns the predicted duration of a rescaling of this cluster */ public Duration scalingDuration() { return scalingDuration; } public ClusterNodesTimeseries nodeTimeseries() { if (nodeTimeseries != null) return nodeTimeseries; return nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb); } public ClusterTimeseries clusterTimeseries() { if (clusterTimeseries != null) return clusterTimeseries; return clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id()); } /** * Returns the predicted max query growth rate per minute as a fraction of the average traffic * in the scaling window */ public double maxQueryGrowthRate() { if (maxQueryGrowthRate != null) return maxQueryGrowthRate; return maxQueryGrowthRate = clusterTimeseries().maxQueryGrowthRate(scalingDuration(), clock); } /** Returns the average query rate in the scaling window as a fraction of the max observed query rate */ public double queryFractionOfMax() { if (queryFractionOfMax != null) return queryFractionOfMax; return queryFractionOfMax = clusterTimeseries().queryFractionOfMax(scalingDuration(), clock); } public Load averageLoad() { return nodeTimeseries().averageLoad(); } public Load idealLoad() { return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad); } /** Ideal cpu load must take the application traffic fraction into account */ private double idealCpuLoad() { double queryCpuFraction = queryCpuFraction(); double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes(); if (queryFractionOfMax() != 0) growthRateHeadroom = Math.min(growthRateHeadroom, 1 / queryFractionOfMax() + 0.1); double maxTrafficShiftHeadroom = 10.0; double trafficShiftHeadroom; if (application.status().maxReadShare() == 0) trafficShiftHeadroom = 2.0; else if (application.status().currentReadShare() == 0) trafficShiftHeadroom = maxTrafficShiftHeadroom; else trafficShiftHeadroom = application.status().maxReadShare() / application.status().currentReadShare(); trafficShiftHeadroom = Math.min(trafficShiftHeadroom, maxTrafficShiftHeadroom); return queryCpuFraction * 1 / growthRateHeadroom * 1 / trafficShiftHeadroom * idealQueryCpuLoad + (1 - queryCpuFraction) * idealWriteCpuLoad; } /** The estimated fraction of cpu usage which goes to processing queries vs. writes */ public double queryCpuFraction() { OptionalDouble queryRate = clusterTimeseries().queryRate(scalingDuration(), clock); OptionalDouble writeRate = clusterTimeseries().writeRate(scalingDuration(), clock); if (queryRate.orElse(0) == 0 && writeRate.orElse(0) == 0) return queryCpuFraction(0.5); return queryCpuFraction(queryRate.orElse(0) / (queryRate.orElse(0) + writeRate.orElse(0))); } private double queryCpuFraction(double queryRateFraction) { double relativeQueryCost = 9; double writeFraction = 1 - queryRateFraction; return queryRateFraction * relativeQueryCost / (queryRateFraction * relativeQueryCost + writeFraction); } private static Duration minimum(Duration smallestAllowed, Duration duration) { if (duration.minus(smallestAllowed).isNegative()) return smallestAllowed; return duration; } private static Duration maximum(Duration largestAllowed, Duration duration) { if ( ! duration.minus(largestAllowed).isNegative()) return largestAllowed; return duration; } }
I'm not fond of manual routing through regular expression...
private static boolean isListTenantsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 2 && request.getUri().getPath().matches("/application/v2/tenant/?"); }
request.getUri().getPath().matches("/application/v2/tenant/?");
private static boolean isListTenantsRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 2 && request.getUri().getPath().matches("/application/v2/tenant/?"); }
class TenantHandler extends HttpHandler { private static final String TENANT_NAME_REGEXP = "[\\w-]+"; private final TenantRepository tenantRepository; private final ApplicationRepository applicationRepository; @Inject public TenantHandler(Context ctx, ApplicationRepository applicationRepository) { super(ctx); this.tenantRepository = applicationRepository.tenantRepository(); this.applicationRepository = applicationRepository; } @Override protected HttpResponse handlePUT(HttpRequest request) { TenantName tenantName = getAndValidateTenantFromRequest(request); try { tenantRepository.addTenant(tenantName); } catch (Exception e) { throw new InternalServerException(Exceptions.toMessageString(e)); } return new TenantCreateResponse(tenantName); } @Override protected HttpResponse handleGET(HttpRequest request) { if (isGetTenantRequest(request)) { final TenantName tenantName = getTenantNameFromRequest(request); Utils.checkThatTenantExists(tenantRepository, tenantName); return new TenantGetResponse(tenantName); } else if (isListTenantsRequest(request)) { return new ListTenantsResponse(ImmutableSet.copyOf(tenantRepository.getAllTenantNames())); } else { throw new BadRequestException(request.getUri().toString()); } } @Override protected HttpResponse handleDELETE(HttpRequest request) { final TenantName tenantName = getTenantNameFromRequest(request); Utils.checkThatTenantExists(tenantRepository, tenantName); applicationRepository.deleteTenant(tenantName); return new TenantDeleteResponse(tenantName); } /** * Gets the tenant name from the request, throws if it exists already and validates its name * * @param request an {@link com.yahoo.container.jdisc.HttpRequest} * @return tenant name */ private TenantName getAndValidateTenantFromRequest(HttpRequest request) { final TenantName tenantName = getTenantNameFromRequest(request); checkThatTenantDoesNotExist(tenantName); validateTenantName(tenantName); return tenantName; } private void validateTenantName(TenantName tenant) { if (!tenant.value().matches(TENANT_NAME_REGEXP)) { throw new BadRequestException("Illegal tenant name: " + tenant); } } private void checkThatTenantDoesNotExist(TenantName tenantName) { if (tenantRepository.checkThatTenantExists(tenantName)) throw new BadRequestException("There already exists a tenant '" + tenantName + "'"); } private static BindingMatch<?> getBindingMatch(HttpRequest request) { return HttpConfigRequests.getBindingMatch(request, "http: "http: "http: } private static boolean isGetTenantRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 3; } private static TenantName getTenantNameFromRequest(HttpRequest request) { BindingMatch<?> bm = getBindingMatch(request); return TenantName.from(bm.group(2)); } }
class TenantHandler extends HttpHandler { private static final String TENANT_NAME_REGEXP = "[\\w-]+"; private final TenantRepository tenantRepository; private final ApplicationRepository applicationRepository; @Inject public TenantHandler(Context ctx, ApplicationRepository applicationRepository) { super(ctx); this.tenantRepository = applicationRepository.tenantRepository(); this.applicationRepository = applicationRepository; } @Override protected HttpResponse handlePUT(HttpRequest request) { TenantName tenantName = getAndValidateTenantFromRequest(request); try { tenantRepository.addTenant(tenantName); } catch (Exception e) { throw new InternalServerException(Exceptions.toMessageString(e)); } return new TenantCreateResponse(tenantName); } @Override protected HttpResponse handleGET(HttpRequest request) { if (isGetTenantRequest(request)) { final TenantName tenantName = getTenantNameFromRequest(request); Utils.checkThatTenantExists(tenantRepository, tenantName); return new TenantGetResponse(tenantName); } else if (isListTenantsRequest(request)) { return new ListTenantsResponse(ImmutableSet.copyOf(tenantRepository.getAllTenantNames())); } else { throw new BadRequestException(request.getUri().toString()); } } @Override protected HttpResponse handleDELETE(HttpRequest request) { final TenantName tenantName = getTenantNameFromRequest(request); Utils.checkThatTenantExists(tenantRepository, tenantName); applicationRepository.deleteTenant(tenantName); return new TenantDeleteResponse(tenantName); } /** * Gets the tenant name from the request, throws if it exists already and validates its name * * @param request an {@link com.yahoo.container.jdisc.HttpRequest} * @return tenant name */ private TenantName getAndValidateTenantFromRequest(HttpRequest request) { final TenantName tenantName = getTenantNameFromRequest(request); checkThatTenantDoesNotExist(tenantName); validateTenantName(tenantName); return tenantName; } private void validateTenantName(TenantName tenant) { if (!tenant.value().matches(TENANT_NAME_REGEXP)) { throw new BadRequestException("Illegal tenant name: " + tenant); } } private void checkThatTenantDoesNotExist(TenantName tenantName) { if (tenantRepository.checkThatTenantExists(tenantName)) throw new BadRequestException("There already exists a tenant '" + tenantName + "'"); } private static BindingMatch<?> getBindingMatch(HttpRequest request) { return HttpConfigRequests.getBindingMatch(request, "http: "http: "http: } private static boolean isGetTenantRequest(HttpRequest request) { return getBindingMatch(request).groupCount() == 3; } private static TenantName getTenantNameFromRequest(HttpRequest request) { BindingMatch<?> bm = getBindingMatch(request); return TenantName.from(bm.group(2)); } }
consider also testing sum/count
public void testEmptyReduce() { assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").avg().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").max().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").median().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").min().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").prod().asDouble(), delta); }
assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").prod().asDouble(), delta);
public void testEmptyReduce() { assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").avg().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").max().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").median().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").min().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").prod().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").sum().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").count().asDouble(), delta); }
class ReduceTestCase { private static final double delta = 0.00000001; @Test public void testReduce() { assertEquals(1.0, Tensor.from("tensor(x[1])", "[1]").median().asDouble(), delta); assertEquals(1.5, Tensor.from("tensor(x[2])", "[1, 2]").median().asDouble(), delta); assertEquals(3.0, Tensor.from("tensor(x[7])", "[3, 1, 1, 1, 4, 4, 4]").median().asDouble(), delta); assertEquals(2.0, Tensor.from("tensor(x[6])", "[3, 1, 1, 1, 4, 4]").median().asDouble(), delta); assertEquals(2.0, Tensor.from("tensor(x{})", "{{x: foo}: 3, {x:bar}: 1}").median().asDouble(), delta); assertNan(Tensor.Builder.of("tensor(x[3])").cell(Double.NaN, 0).cell(1, 1).cell(2, 2).build().median()); assertNan(Tensor.Builder.of("tensor(x[3])").cell(Double.NaN, 2).cell(1, 1).cell(2, 0).build().median()); assertNan(Tensor.Builder.of("tensor(x[1])").cell(Double.NaN, 0).build().median()); } @Test private void assertNan(Tensor tensor) { assertTrue(tensor + " is NaN", Double.isNaN(tensor.asDouble())); } }
class ReduceTestCase { private static final double delta = 0.00000001; @Test public void testReduce() { assertEquals(1.0, Tensor.from("tensor(x[1])", "[1]").median().asDouble(), delta); assertEquals(1.5, Tensor.from("tensor(x[2])", "[1, 2]").median().asDouble(), delta); assertEquals(3.0, Tensor.from("tensor(x[7])", "[3, 1, 1, 1, 4, 4, 4]").median().asDouble(), delta); assertEquals(2.0, Tensor.from("tensor(x[6])", "[3, 1, 1, 1, 4, 4]").median().asDouble(), delta); assertEquals(2.0, Tensor.from("tensor(x{})", "{{x: foo}: 3, {x:bar}: 1}").median().asDouble(), delta); assertNan(Tensor.Builder.of("tensor(x[3])").cell(Double.NaN, 0).cell(1, 1).cell(2, 2).build().median()); assertNan(Tensor.Builder.of("tensor(x[3])").cell(Double.NaN, 2).cell(1, 1).cell(2, 0).build().median()); assertNan(Tensor.Builder.of("tensor(x[1])").cell(Double.NaN, 0).build().median()); } @Test private void assertNan(Tensor tensor) { assertTrue(tensor + " is NaN", Double.isNaN(tensor.asDouble())); } }
Thanks!
public void testEmptyReduce() { assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").avg().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").max().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").median().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").min().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").prod().asDouble(), delta); }
assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").prod().asDouble(), delta);
public void testEmptyReduce() { assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").avg().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").max().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").median().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").min().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").prod().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").sum().asDouble(), delta); assertEquals(0.0, Tensor.from("tensor(x[3],y{})", "{}").count().asDouble(), delta); }
class ReduceTestCase { private static final double delta = 0.00000001; @Test public void testReduce() { assertEquals(1.0, Tensor.from("tensor(x[1])", "[1]").median().asDouble(), delta); assertEquals(1.5, Tensor.from("tensor(x[2])", "[1, 2]").median().asDouble(), delta); assertEquals(3.0, Tensor.from("tensor(x[7])", "[3, 1, 1, 1, 4, 4, 4]").median().asDouble(), delta); assertEquals(2.0, Tensor.from("tensor(x[6])", "[3, 1, 1, 1, 4, 4]").median().asDouble(), delta); assertEquals(2.0, Tensor.from("tensor(x{})", "{{x: foo}: 3, {x:bar}: 1}").median().asDouble(), delta); assertNan(Tensor.Builder.of("tensor(x[3])").cell(Double.NaN, 0).cell(1, 1).cell(2, 2).build().median()); assertNan(Tensor.Builder.of("tensor(x[3])").cell(Double.NaN, 2).cell(1, 1).cell(2, 0).build().median()); assertNan(Tensor.Builder.of("tensor(x[1])").cell(Double.NaN, 0).build().median()); } @Test private void assertNan(Tensor tensor) { assertTrue(tensor + " is NaN", Double.isNaN(tensor.asDouble())); } }
class ReduceTestCase { private static final double delta = 0.00000001; @Test public void testReduce() { assertEquals(1.0, Tensor.from("tensor(x[1])", "[1]").median().asDouble(), delta); assertEquals(1.5, Tensor.from("tensor(x[2])", "[1, 2]").median().asDouble(), delta); assertEquals(3.0, Tensor.from("tensor(x[7])", "[3, 1, 1, 1, 4, 4, 4]").median().asDouble(), delta); assertEquals(2.0, Tensor.from("tensor(x[6])", "[3, 1, 1, 1, 4, 4]").median().asDouble(), delta); assertEquals(2.0, Tensor.from("tensor(x{})", "{{x: foo}: 3, {x:bar}: 1}").median().asDouble(), delta); assertNan(Tensor.Builder.of("tensor(x[3])").cell(Double.NaN, 0).cell(1, 1).cell(2, 2).build().median()); assertNan(Tensor.Builder.of("tensor(x[3])").cell(Double.NaN, 2).cell(1, 1).cell(2, 0).build().median()); assertNan(Tensor.Builder.of("tensor(x[1])").cell(Double.NaN, 0).build().median()); } @Test private void assertNan(Tensor tensor) { assertTrue(tensor + " is NaN", Double.isNaN(tensor.asDouble())); } }
`(((\(✘෴✘)/)))`
public void run() { log.log(Level.FINE, () -> "Submitting after having waited " + delay + " ms in LATER queue: " + processingTask); processingTask.submit(); }
log.log(Level.FINE, () -> "Submitting after having waited " + delay + " ms in LATER queue: " + processingTask);
public void run() { log.log(Level.FINE, () -> "Submitting after having waited " + delay + " ms in LATER queue: " + processingTask); processingTask.submit(); }
class LaterTimerTask extends TimerTask { private DocumentProcessingTask processingTask; private long delay; private LaterTimerTask(DocumentProcessingTask processingTask, long delay) { this.delay = delay; log.log(Level.FINE, () -> "Enqueueing in " + delay + " ms due to Progress.LATER: " + processingTask); this.processingTask = processingTask; } @Override }
class LaterTimerTask extends TimerTask { private DocumentProcessingTask processingTask; private long delay; private LaterTimerTask(DocumentProcessingTask processingTask, long delay) { this.delay = delay; log.log(Level.FINE, () -> "Enqueueing in " + delay + " ms due to Progress.LATER: " + processingTask); this.processingTask = processingTask; } @Override }
Missing lazy logging and generating of stack trace
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; }
log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage);
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); if (log.isLoggable(Level.FINE)) log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final int configuredIndex; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.configuredIndex = options.fleetControllerIndex; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(Level.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Collections.unmodifiableSet( Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()) .collect(Collectors.toSet())); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } private void selfTerminateIfConfiguredNodeIndexHasChanged() { if (options.fleetControllerIndex != configuredIndex) { log.warning(String.format("Got new configuration where CC index has changed from %d to %d. We do not support "+ "doing this live; immediately exiting now to force new configuration", configuredIndex, options.fleetControllerIndex)); prepareShutdownEdge(); System.exit(1); } } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (isMaster) { didWork |= broadcastClusterStateToEligibleNodes(); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); didWork |= maybePublishOldMetrics(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(Level.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { log.info(currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (taskMayBeCompletedImmediately(task)) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfo().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfo())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; log.log(Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(Level.FINE, () -> "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final int configuredIndex; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.configuredIndex = options.fleetControllerIndex; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(Level.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Collections.unmodifiableSet( Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()) .collect(Collectors.toSet())); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } private void selfTerminateIfConfiguredNodeIndexHasChanged() { if (options.fleetControllerIndex != configuredIndex) { log.warning(String.format("Got new configuration where CC index has changed from %d to %d. We do not support "+ "doing this live; immediately exiting now to force new configuration", configuredIndex, options.fleetControllerIndex)); prepareShutdownEdge(); System.exit(1); } } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (isMaster) { didWork |= broadcastClusterStateToEligibleNodes(); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); didWork |= maybePublishOldMetrics(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(Level.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { log.info(currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (taskMayBeCompletedImmediately(task)) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfo().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfo())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; log.log(Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(Level.FINE, () -> "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
Yes, I gave up on a very few of these :)
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; }
log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage);
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); if (log.isLoggable(Level.FINE)) log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final int configuredIndex; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.configuredIndex = options.fleetControllerIndex; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(Level.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Collections.unmodifiableSet( Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()) .collect(Collectors.toSet())); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } private void selfTerminateIfConfiguredNodeIndexHasChanged() { if (options.fleetControllerIndex != configuredIndex) { log.warning(String.format("Got new configuration where CC index has changed from %d to %d. We do not support "+ "doing this live; immediately exiting now to force new configuration", configuredIndex, options.fleetControllerIndex)); prepareShutdownEdge(); System.exit(1); } } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (isMaster) { didWork |= broadcastClusterStateToEligibleNodes(); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); didWork |= maybePublishOldMetrics(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(Level.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { log.info(currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (taskMayBeCompletedImmediately(task)) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfo().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfo())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; log.log(Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(Level.FINE, () -> "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final int configuredIndex; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.configuredIndex = options.fleetControllerIndex; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(Level.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Collections.unmodifiableSet( Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()) .collect(Collectors.toSet())); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } private void selfTerminateIfConfiguredNodeIndexHasChanged() { if (options.fleetControllerIndex != configuredIndex) { log.warning(String.format("Got new configuration where CC index has changed from %d to %d. We do not support "+ "doing this live; immediately exiting now to force new configuration", configuredIndex, options.fleetControllerIndex)); prepareShutdownEdge(); System.exit(1); } } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (isMaster) { didWork |= broadcastClusterStateToEligibleNodes(); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); didWork |= maybePublishOldMetrics(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(Level.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { log.info(currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (taskMayBeCompletedImmediately(task)) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfo().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfo())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; log.log(Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(Level.FINE, () -> "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
Could have used the if thing. sure. Ok ok .
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; }
log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage);
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); if (log.isLoggable(Level.FINE)) log.log(Level.FINE, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final int configuredIndex; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.configuredIndex = options.fleetControllerIndex; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(Level.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Collections.unmodifiableSet( Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()) .collect(Collectors.toSet())); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } private void selfTerminateIfConfiguredNodeIndexHasChanged() { if (options.fleetControllerIndex != configuredIndex) { log.warning(String.format("Got new configuration where CC index has changed from %d to %d. We do not support "+ "doing this live; immediately exiting now to force new configuration", configuredIndex, options.fleetControllerIndex)); prepareShutdownEdge(); System.exit(1); } } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (isMaster) { didWork |= broadcastClusterStateToEligibleNodes(); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); didWork |= maybePublishOldMetrics(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(Level.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { log.info(currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (taskMayBeCompletedImmediately(task)) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfo().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfo())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; log.log(Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(Level.FINE, () -> "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final int configuredIndex; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.configuredIndex = options.fleetControllerIndex; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(Level.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(Level.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { log.fine(() -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, context); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Collections.unmodifiableSet( Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()) .collect(Collectors.toSet())); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } private void selfTerminateIfConfiguredNodeIndexHasChanged() { if (options.fleetControllerIndex != configuredIndex) { log.warning(String.format("Got new configuration where CC index has changed from %d to %d. We do not support "+ "doing this live; immediately exiting now to force new configuration", configuredIndex, options.fleetControllerIndex)); prepareShutdownEdge(); System.exit(1); } } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (isMaster) { didWork |= broadcastClusterStateToEligibleNodes(); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); didWork |= maybePublishOldMetrics(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(Level.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { log.log(Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { log.info(currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (taskMayBeCompletedImmediately(task)) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfo().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); log.log(Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfo())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); log.info(() -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; log.log(Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(Level.FINE, () -> "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
You need to declare explicit exception mapping for all exception types that are not `RestApiException` (or subclass). They will otherwise result in the default 500 response from jdisc.
private static RestApi createRestApiDefinition(LoadBalancersV1ApiHandler self) { return RestApi.builder() .addRoute(RestApi.route("/loadbalancers/v1") .get(self::getLoadBalancers)) .build(); }
.addRoute(RestApi.route("/loadbalancers/v1")
private static RestApi createRestApiDefinition(LoadBalancersV1ApiHandler self) { return RestApi.builder() .addRoute(RestApi.route("/loadbalancers/v1") .get(self::getLoadBalancers)) .build(); }
class LoadBalancersV1ApiHandler extends RestApiRequestHandler<LoadBalancersV1ApiHandler> { private final NodeRepository nodeRepository; @Inject public LoadBalancersV1ApiHandler(LoggingRequestHandler.Context parentCtx, NodeRepository nodeRepository) { super(parentCtx, LoadBalancersV1ApiHandler::createRestApiDefinition); this.nodeRepository = nodeRepository; } private HttpResponse getLoadBalancers(RestApi.RequestContext context) { return new LoadBalancersResponse(context.request(), nodeRepository); } }
class LoadBalancersV1ApiHandler extends RestApiRequestHandler<LoadBalancersV1ApiHandler> { private final NodeRepository nodeRepository; @Inject public LoadBalancersV1ApiHandler(LoggingRequestHandler.Context parentCtx, NodeRepository nodeRepository) { super(parentCtx, LoadBalancersV1ApiHandler::createRestApiDefinition); this.nodeRepository = nodeRepository; } private HttpResponse getLoadBalancers(RestApi.RequestContext context) { return new LoadBalancersResponse(context.request(), nodeRepository); } }
Thanks, will keep in mind.
private static RestApi createRestApiDefinition(LoadBalancersV1ApiHandler self) { return RestApi.builder() .addRoute(RestApi.route("/loadbalancers/v1") .get(self::getLoadBalancers)) .build(); }
.addRoute(RestApi.route("/loadbalancers/v1")
private static RestApi createRestApiDefinition(LoadBalancersV1ApiHandler self) { return RestApi.builder() .addRoute(RestApi.route("/loadbalancers/v1") .get(self::getLoadBalancers)) .build(); }
class LoadBalancersV1ApiHandler extends RestApiRequestHandler<LoadBalancersV1ApiHandler> { private final NodeRepository nodeRepository; @Inject public LoadBalancersV1ApiHandler(LoggingRequestHandler.Context parentCtx, NodeRepository nodeRepository) { super(parentCtx, LoadBalancersV1ApiHandler::createRestApiDefinition); this.nodeRepository = nodeRepository; } private HttpResponse getLoadBalancers(RestApi.RequestContext context) { return new LoadBalancersResponse(context.request(), nodeRepository); } }
class LoadBalancersV1ApiHandler extends RestApiRequestHandler<LoadBalancersV1ApiHandler> { private final NodeRepository nodeRepository; @Inject public LoadBalancersV1ApiHandler(LoggingRequestHandler.Context parentCtx, NodeRepository nodeRepository) { super(parentCtx, LoadBalancersV1ApiHandler::createRestApiDefinition); this.nodeRepository = nodeRepository; } private HttpResponse getLoadBalancers(RestApi.RequestContext context) { return new LoadBalancersResponse(context.request(), nodeRepository); } }
extra sync up front should not be needed
public void deconstruct() { transport.sync().shutdown().join(); }
transport.sync().shutdown().join();
public void deconstruct() { transport.shutdown().join(); }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); orb.useSmallBuffers(); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
should the orb be tuned for small buffers?
private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); }
this(transport, new Supervisor(transport), duperModel);
private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override public void deconstruct() { transport.sync().shutdown().join(); } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); orb.useSmallBuffers(); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override public void deconstruct() { transport.shutdown().join(); } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
```suggestion transport.shutdown().join(); ```
public void deconstruct() { transport.sync().shutdown().join(); }
transport.sync().shutdown().join();
public void deconstruct() { transport.shutdown().join(); }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); orb.useSmallBuffers(); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
Perhaps. Ok.
private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); }
this(transport, new Supervisor(transport), duperModel);
private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override public void deconstruct() { transport.sync().shutdown().join(); } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); orb.useSmallBuffers(); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override public void deconstruct() { transport.shutdown().join(); } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
```suggestion this(() -> new SlobrokMonitor(orb), transport, duperModel); orb.useSmallBuffers(); ```
private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); }
this(() -> new SlobrokMonitor(orb), transport, duperModel);
private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) { this(() -> new SlobrokMonitor(orb), transport, duperModel); orb.useSmallBuffers(); }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override public void deconstruct() { transport.shutdown().join(); } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; private final Transport transport; private static int getTransportThreadCount() { return Math.max(4, Runtime.getRuntime().availableProcessors()); } @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel); } private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) { this(transport, new Supervisor(transport), duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.transport = transport; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(Level.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public void bootstrapComplete() { } @Override public void deconstruct() { transport.shutdown().join(); } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(Level.FINE, () -> "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } } }
`isEmpty`?
private boolean hasProductionDeployment(TenantName tenant) { return controller().applications().asList(tenant).stream() .map(Application::productionInstances) .anyMatch(prodInstances -> prodInstances.size()>0); }
.anyMatch(prodInstances -> prodInstances.size()>0);
private boolean hasProductionDeployment(TenantName tenant) { return controller().applications().asList(tenant).stream() .map(Application::productionInstances) .noneMatch(Map::isEmpty); }
class TenantRoleMaintainer extends ControllerMaintainer { public TenantRoleMaintainer(Controller controller, Duration tenantRoleMaintainer) { super(controller, tenantRoleMaintainer); } @Override protected boolean maintain() { var roleService = controller().serviceRegistry().roleService(); var tenants = controller().tenants().asList(); var tenantsWithRoles = tenants.stream() .map(Tenant::name) .filter(this::hasProductionDeployment) .collect(Collectors.toList()); roleService.maintainRoles(tenantsWithRoles); return true; } }
class TenantRoleMaintainer extends ControllerMaintainer { public TenantRoleMaintainer(Controller controller, Duration tenantRoleMaintainer) { super(controller, tenantRoleMaintainer); } @Override protected boolean maintain() { var roleService = controller().serviceRegistry().roleService(); var tenants = controller().tenants().asList(); var tenantsWithRoles = tenants.stream() .map(Tenant::name) .filter(this::hasProductionDeployment) .collect(Collectors.toList()); roleService.maintainRoles(tenantsWithRoles); return true; } }
Can be simplified to `planCounter.merge(planId.value(), 1, Integer::sum)`.
private void reportTenantMetrics() { if (! controller().system().isPublic()) return; var planCounter = new TreeMap<String, Integer>(); controller().tenants().asList().forEach(tenant -> { var planId = controller().serviceRegistry().billingController().getPlan(tenant.name()); planCounter.computeIfPresent(planId.value(), (k, v) -> v + 1); planCounter.putIfAbsent(planId.value(), 1); }); planCounter.forEach((planId, count) -> { var context = metric.createContext(Map.of("plan", planId)); metric.set(TENANT_METRIC, count, context); }); }
planCounter.putIfAbsent(planId.value(), 1);
private void reportTenantMetrics() { if (! controller().system().isPublic()) return; var planCounter = new TreeMap<String, Integer>(); controller().tenants().asList().forEach(tenant -> { var planId = controller().serviceRegistry().billingController().getPlan(tenant.name()); planCounter.merge(planId.value(), 1, Integer::sum); }); planCounter.forEach((planId, count) -> { var context = metric.createContext(Map.of("plan", planId)); metric.set(TENANT_METRIC, count, context); }); }
class MetricsReporter extends ControllerMaintainer { public static final String TENANT_METRIC = "tenant"; public static final String DEPLOYMENT_FAIL_METRIC = "deployment.failurePercentage"; public static final String DEPLOYMENT_AVERAGE_DURATION = "deployment.averageDuration"; public static final String DEPLOYMENT_FAILING_UPGRADES = "deployment.failingUpgrades"; public static final String DEPLOYMENT_BUILD_AGE_SECONDS = "deployment.buildAgeSeconds"; public static final String DEPLOYMENT_WARNINGS = "deployment.warnings"; public static final String OS_CHANGE_DURATION = "deployment.osChangeDuration"; public static final String PLATFORM_CHANGE_DURATION = "deployment.platformChangeDuration"; public static final String OS_NODE_COUNT = "deployment.nodeCountByOsVersion"; public static final String PLATFORM_NODE_COUNT = "deployment.nodeCountByPlatformVersion"; public static final String BROKEN_SYSTEM_VERSION = "deployment.brokenSystemVersion"; public static final String REMAINING_ROTATIONS = "remaining_rotations"; public static final String NAME_SERVICE_REQUESTS_QUEUED = "dns.queuedRequests"; public static final String OPERATION_PREFIX = "operation."; private final Metric metric; private final Clock clock; private final ConcurrentHashMap<NodeCountKey, Long> nodeCounts = new ConcurrentHashMap<>(); public MetricsReporter(Controller controller, Metric metric) { super(controller, Duration.ofMinutes(1)); this.metric = metric; this.clock = controller.clock(); } @Override public boolean maintain() { reportDeploymentMetrics(); reportRemainingRotations(); reportQueuedNameServiceRequests(); VersionStatus versionStatus = controller().readVersionStatus(); reportInfrastructureUpgradeMetrics(versionStatus); reportAuditLog(); reportBrokenSystemVersion(versionStatus); reportTenantMetrics(); return true; } private void reportBrokenSystemVersion(VersionStatus versionStatus) { Version systemVersion = controller().systemVersion(versionStatus); VespaVersion.Confidence confidence = versionStatus.version(systemVersion).confidence(); int isBroken = confidence == VespaVersion.Confidence.broken ? 1 : 0; metric.set(BROKEN_SYSTEM_VERSION, isBroken, metric.createContext(Map.of())); } private void reportAuditLog() { AuditLog log = controller().auditLogger().readLog(); HashMap<String, HashMap<String, Integer>> metricCounts = new HashMap<>(); for (AuditLog.Entry entry : log.entries()) { String[] resource = entry.resource().split("/"); if((resource.length > 1) && (resource[1] != null)) { String api = resource[1]; String operationMetric = OPERATION_PREFIX + api; HashMap<String, Integer> dimension = metricCounts.get(operationMetric); if (dimension != null) { Integer count = dimension.get(entry.principal()); if (count != null) { dimension.replace(entry.principal(), ++count); } else { dimension.put(entry.principal(), 1); } } else { dimension = new HashMap<>(); dimension.put(entry.principal(),1); metricCounts.put(operationMetric, dimension); } } } for (String operationMetric : metricCounts.keySet()) { for (String userDimension : metricCounts.get(operationMetric).keySet()) { metric.set(operationMetric, (metricCounts.get(operationMetric)).get(userDimension), metric.createContext(Map.of("operator", userDimension))); } } } private void reportInfrastructureUpgradeMetrics(VersionStatus versionStatus) { Map<NodeVersion, Duration> osChangeDurations = osChangeDurations(); Map<NodeVersion, Duration> platformChangeDurations = platformChangeDurations(versionStatus); reportChangeDurations(osChangeDurations, OS_CHANGE_DURATION); reportChangeDurations(platformChangeDurations, PLATFORM_CHANGE_DURATION); reportNodeCount(osChangeDurations.keySet(), OS_NODE_COUNT); reportNodeCount(platformChangeDurations.keySet(), PLATFORM_NODE_COUNT); } private void reportRemainingRotations() { try (RotationLock lock = controller().routing().rotations().lock()) { int availableRotations = controller().routing().rotations().availableRotations(lock).size(); metric.set(REMAINING_ROTATIONS, availableRotations, metric.createContext(Map.of())); } } private void reportDeploymentMetrics() { ApplicationList applications = ApplicationList.from(controller().applications().readable()) .withProductionDeployment(); DeploymentStatusList deployments = controller().jobController().deploymentStatuses(applications); metric.set(DEPLOYMENT_FAIL_METRIC, deploymentFailRatio(deployments) * 100, metric.createContext(Map.of())); averageDeploymentDurations(deployments, clock.instant()).forEach((instance, duration) -> { metric.set(DEPLOYMENT_AVERAGE_DURATION, duration.getSeconds(), metric.createContext(dimensions(instance))); }); deploymentsFailingUpgrade(deployments).forEach((instance, failingJobs) -> { metric.set(DEPLOYMENT_FAILING_UPGRADES, failingJobs, metric.createContext(dimensions(instance))); }); deploymentWarnings(deployments).forEach((application, warnings) -> { metric.set(DEPLOYMENT_WARNINGS, warnings, metric.createContext(dimensions(application))); }); for (Application application : applications.asList()) application.latestVersion() .flatMap(ApplicationVersion::buildTime) .ifPresent(buildTime -> metric.set(DEPLOYMENT_BUILD_AGE_SECONDS, controller().clock().instant().getEpochSecond() - buildTime.getEpochSecond(), metric.createContext(dimensions(application.id().defaultInstance())))); } private void reportQueuedNameServiceRequests() { metric.set(NAME_SERVICE_REQUESTS_QUEUED, controller().curator().readNameServiceQueue().requests().size(), metric.createContext(Map.of())); } private void reportNodeCount(Set<NodeVersion> nodeVersions, String metricName) { Map<NodeCountKey, Long> newNodeCounts = nodeVersions.stream() .collect(Collectors.groupingBy(nodeVersion -> { return new NodeCountKey(metricName, nodeVersion.currentVersion(), nodeVersion.zone()); }, Collectors.counting())); nodeCounts.putAll(newNodeCounts); nodeCounts.forEach((key, count) -> { if (newNodeCounts.containsKey(key)) { metric.set(metricName, count, metric.createContext(dimensions(key.zone, key.version))); } else if (key.metricName.equals(metricName)) { metric.set(metricName, 0, metric.createContext(dimensions(key.zone, key.version))); } }); } private void reportChangeDurations(Map<NodeVersion, Duration> changeDurations, String metricName) { changeDurations.forEach((nodeVersion, duration) -> { metric.set(metricName, duration.toSeconds(), metric.createContext(dimensions(nodeVersion.hostname(), nodeVersion.zone()))); }); } private Map<NodeVersion, Duration> platformChangeDurations(VersionStatus versionStatus) { return changeDurations(versionStatus.versions(), VespaVersion::nodeVersions); } private Map<NodeVersion, Duration> osChangeDurations() { return changeDurations(controller().osVersionStatus().versions().values(), Function.identity()); } private <V> Map<NodeVersion, Duration> changeDurations(Collection<V> versions, Function<V, NodeVersions> versionsGetter) { var now = clock.instant(); var durations = new HashMap<NodeVersion, Duration>(); for (var version : versions) { for (var nodeVersion : versionsGetter.apply(version).asMap().values()) { durations.put(nodeVersion, nodeVersion.changeDuration(now)); } } return durations; } private static double deploymentFailRatio(DeploymentStatusList statuses) { return statuses.asList().stream() .mapToInt(status -> status.hasFailures() ? 1 : 0) .average().orElse(0); } private static Map<ApplicationId, Duration> averageDeploymentDurations(DeploymentStatusList statuses, Instant now) { return statuses.asList().stream() .flatMap(status -> status.instanceJobs().entrySet().stream()) .collect(Collectors.toUnmodifiableMap(entry -> entry.getKey(), entry -> averageDeploymentDuration(entry.getValue(), now))); } private static Map<ApplicationId, Integer> deploymentsFailingUpgrade(DeploymentStatusList statuses) { return statuses.asList().stream() .flatMap(status -> status.instanceJobs().entrySet().stream()) .collect(Collectors.toUnmodifiableMap(entry -> entry.getKey(), entry -> deploymentsFailingUpgrade(entry.getValue()))); } private static int deploymentsFailingUpgrade(JobList jobs) { return jobs.failing().not().failingApplicationChange().size(); } private static Duration averageDeploymentDuration(JobList jobs, Instant now) { List<Duration> jobDurations = jobs.lastTriggered() .mapToList(run -> Duration.between(run.start(), run.end().orElse(now))); return jobDurations.stream() .reduce(Duration::plus) .map(totalDuration -> totalDuration.dividedBy(jobDurations.size())) .orElse(Duration.ZERO); } private static Map<ApplicationId, Integer> deploymentWarnings(DeploymentStatusList statuses) { return statuses.asList().stream() .flatMap(status -> status.application().instances().values().stream()) .collect(Collectors.toMap(Instance::id, a -> maxWarningCountOf(a.deployments().values()))); } private static int maxWarningCountOf(Collection<Deployment> deployments) { return deployments.stream() .map(Deployment::metrics) .map(DeploymentMetrics::warnings) .map(Map::values) .flatMap(Collection::stream) .max(Integer::compareTo) .orElse(0); } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenant", application.tenant().value(), "app", application.application().value() + "." + application.instance().value(), "applicationId", application.toFullString()); } private static Map<String, String> dimensions(HostName hostname, ZoneId zone) { return Map.of("host", hostname.value(), "zone", zone.value()); } private static Map<String, String> dimensions(ZoneId zone, Version currentVersion) { return Map.of("zone", zone.value(), "currentVersion", currentVersion.toFullString()); } private static class NodeCountKey { private final String metricName; private final Version version; private final ZoneId zone; public NodeCountKey(String metricName, Version version, ZoneId zone) { this.metricName = metricName; this.version = version; this.zone = zone; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NodeCountKey that = (NodeCountKey) o; return metricName.equals(that.metricName) && version.equals(that.version) && zone.equals(that.zone); } @Override public int hashCode() { return Objects.hash(metricName, version, zone); } } }
class MetricsReporter extends ControllerMaintainer { public static final String TENANT_METRIC = "billing.tenants"; public static final String DEPLOYMENT_FAIL_METRIC = "deployment.failurePercentage"; public static final String DEPLOYMENT_AVERAGE_DURATION = "deployment.averageDuration"; public static final String DEPLOYMENT_FAILING_UPGRADES = "deployment.failingUpgrades"; public static final String DEPLOYMENT_BUILD_AGE_SECONDS = "deployment.buildAgeSeconds"; public static final String DEPLOYMENT_WARNINGS = "deployment.warnings"; public static final String OS_CHANGE_DURATION = "deployment.osChangeDuration"; public static final String PLATFORM_CHANGE_DURATION = "deployment.platformChangeDuration"; public static final String OS_NODE_COUNT = "deployment.nodeCountByOsVersion"; public static final String PLATFORM_NODE_COUNT = "deployment.nodeCountByPlatformVersion"; public static final String BROKEN_SYSTEM_VERSION = "deployment.brokenSystemVersion"; public static final String REMAINING_ROTATIONS = "remaining_rotations"; public static final String NAME_SERVICE_REQUESTS_QUEUED = "dns.queuedRequests"; public static final String OPERATION_PREFIX = "operation."; private final Metric metric; private final Clock clock; private final ConcurrentHashMap<NodeCountKey, Long> nodeCounts = new ConcurrentHashMap<>(); public MetricsReporter(Controller controller, Metric metric) { super(controller, Duration.ofMinutes(1)); this.metric = metric; this.clock = controller.clock(); } @Override public boolean maintain() { reportDeploymentMetrics(); reportRemainingRotations(); reportQueuedNameServiceRequests(); VersionStatus versionStatus = controller().readVersionStatus(); reportInfrastructureUpgradeMetrics(versionStatus); reportAuditLog(); reportBrokenSystemVersion(versionStatus); reportTenantMetrics(); return true; } private void reportBrokenSystemVersion(VersionStatus versionStatus) { Version systemVersion = controller().systemVersion(versionStatus); VespaVersion.Confidence confidence = versionStatus.version(systemVersion).confidence(); int isBroken = confidence == VespaVersion.Confidence.broken ? 1 : 0; metric.set(BROKEN_SYSTEM_VERSION, isBroken, metric.createContext(Map.of())); } private void reportAuditLog() { AuditLog log = controller().auditLogger().readLog(); HashMap<String, HashMap<String, Integer>> metricCounts = new HashMap<>(); for (AuditLog.Entry entry : log.entries()) { String[] resource = entry.resource().split("/"); if((resource.length > 1) && (resource[1] != null)) { String api = resource[1]; String operationMetric = OPERATION_PREFIX + api; HashMap<String, Integer> dimension = metricCounts.get(operationMetric); if (dimension != null) { Integer count = dimension.get(entry.principal()); if (count != null) { dimension.replace(entry.principal(), ++count); } else { dimension.put(entry.principal(), 1); } } else { dimension = new HashMap<>(); dimension.put(entry.principal(),1); metricCounts.put(operationMetric, dimension); } } } for (String operationMetric : metricCounts.keySet()) { for (String userDimension : metricCounts.get(operationMetric).keySet()) { metric.set(operationMetric, (metricCounts.get(operationMetric)).get(userDimension), metric.createContext(Map.of("operator", userDimension))); } } } private void reportInfrastructureUpgradeMetrics(VersionStatus versionStatus) { Map<NodeVersion, Duration> osChangeDurations = osChangeDurations(); Map<NodeVersion, Duration> platformChangeDurations = platformChangeDurations(versionStatus); reportChangeDurations(osChangeDurations, OS_CHANGE_DURATION); reportChangeDurations(platformChangeDurations, PLATFORM_CHANGE_DURATION); reportNodeCount(osChangeDurations.keySet(), OS_NODE_COUNT); reportNodeCount(platformChangeDurations.keySet(), PLATFORM_NODE_COUNT); } private void reportRemainingRotations() { try (RotationLock lock = controller().routing().rotations().lock()) { int availableRotations = controller().routing().rotations().availableRotations(lock).size(); metric.set(REMAINING_ROTATIONS, availableRotations, metric.createContext(Map.of())); } } private void reportDeploymentMetrics() { ApplicationList applications = ApplicationList.from(controller().applications().readable()) .withProductionDeployment(); DeploymentStatusList deployments = controller().jobController().deploymentStatuses(applications); metric.set(DEPLOYMENT_FAIL_METRIC, deploymentFailRatio(deployments) * 100, metric.createContext(Map.of())); averageDeploymentDurations(deployments, clock.instant()).forEach((instance, duration) -> { metric.set(DEPLOYMENT_AVERAGE_DURATION, duration.getSeconds(), metric.createContext(dimensions(instance))); }); deploymentsFailingUpgrade(deployments).forEach((instance, failingJobs) -> { metric.set(DEPLOYMENT_FAILING_UPGRADES, failingJobs, metric.createContext(dimensions(instance))); }); deploymentWarnings(deployments).forEach((application, warnings) -> { metric.set(DEPLOYMENT_WARNINGS, warnings, metric.createContext(dimensions(application))); }); for (Application application : applications.asList()) application.latestVersion() .flatMap(ApplicationVersion::buildTime) .ifPresent(buildTime -> metric.set(DEPLOYMENT_BUILD_AGE_SECONDS, controller().clock().instant().getEpochSecond() - buildTime.getEpochSecond(), metric.createContext(dimensions(application.id().defaultInstance())))); } private void reportQueuedNameServiceRequests() { metric.set(NAME_SERVICE_REQUESTS_QUEUED, controller().curator().readNameServiceQueue().requests().size(), metric.createContext(Map.of())); } private void reportNodeCount(Set<NodeVersion> nodeVersions, String metricName) { Map<NodeCountKey, Long> newNodeCounts = nodeVersions.stream() .collect(Collectors.groupingBy(nodeVersion -> { return new NodeCountKey(metricName, nodeVersion.currentVersion(), nodeVersion.zone()); }, Collectors.counting())); nodeCounts.putAll(newNodeCounts); nodeCounts.forEach((key, count) -> { if (newNodeCounts.containsKey(key)) { metric.set(metricName, count, metric.createContext(dimensions(key.zone, key.version))); } else if (key.metricName.equals(metricName)) { metric.set(metricName, 0, metric.createContext(dimensions(key.zone, key.version))); } }); } private void reportChangeDurations(Map<NodeVersion, Duration> changeDurations, String metricName) { changeDurations.forEach((nodeVersion, duration) -> { metric.set(metricName, duration.toSeconds(), metric.createContext(dimensions(nodeVersion.hostname(), nodeVersion.zone()))); }); } private Map<NodeVersion, Duration> platformChangeDurations(VersionStatus versionStatus) { return changeDurations(versionStatus.versions(), VespaVersion::nodeVersions); } private Map<NodeVersion, Duration> osChangeDurations() { return changeDurations(controller().osVersionStatus().versions().values(), Function.identity()); } private <V> Map<NodeVersion, Duration> changeDurations(Collection<V> versions, Function<V, NodeVersions> versionsGetter) { var now = clock.instant(); var durations = new HashMap<NodeVersion, Duration>(); for (var version : versions) { for (var nodeVersion : versionsGetter.apply(version).asMap().values()) { durations.put(nodeVersion, nodeVersion.changeDuration(now)); } } return durations; } private static double deploymentFailRatio(DeploymentStatusList statuses) { return statuses.asList().stream() .mapToInt(status -> status.hasFailures() ? 1 : 0) .average().orElse(0); } private static Map<ApplicationId, Duration> averageDeploymentDurations(DeploymentStatusList statuses, Instant now) { return statuses.asList().stream() .flatMap(status -> status.instanceJobs().entrySet().stream()) .collect(Collectors.toUnmodifiableMap(entry -> entry.getKey(), entry -> averageDeploymentDuration(entry.getValue(), now))); } private static Map<ApplicationId, Integer> deploymentsFailingUpgrade(DeploymentStatusList statuses) { return statuses.asList().stream() .flatMap(status -> status.instanceJobs().entrySet().stream()) .collect(Collectors.toUnmodifiableMap(entry -> entry.getKey(), entry -> deploymentsFailingUpgrade(entry.getValue()))); } private static int deploymentsFailingUpgrade(JobList jobs) { return jobs.failing().not().failingApplicationChange().size(); } private static Duration averageDeploymentDuration(JobList jobs, Instant now) { List<Duration> jobDurations = jobs.lastTriggered() .mapToList(run -> Duration.between(run.start(), run.end().orElse(now))); return jobDurations.stream() .reduce(Duration::plus) .map(totalDuration -> totalDuration.dividedBy(jobDurations.size())) .orElse(Duration.ZERO); } private static Map<ApplicationId, Integer> deploymentWarnings(DeploymentStatusList statuses) { return statuses.asList().stream() .flatMap(status -> status.application().instances().values().stream()) .collect(Collectors.toMap(Instance::id, a -> maxWarningCountOf(a.deployments().values()))); } private static int maxWarningCountOf(Collection<Deployment> deployments) { return deployments.stream() .map(Deployment::metrics) .map(DeploymentMetrics::warnings) .map(Map::values) .flatMap(Collection::stream) .max(Integer::compareTo) .orElse(0); } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenant", application.tenant().value(), "app", application.application().value() + "." + application.instance().value(), "applicationId", application.toFullString()); } private static Map<String, String> dimensions(HostName hostname, ZoneId zone) { return Map.of("host", hostname.value(), "zone", zone.value()); } private static Map<String, String> dimensions(ZoneId zone, Version currentVersion) { return Map.of("zone", zone.value(), "currentVersion", currentVersion.toFullString()); } private static class NodeCountKey { private final String metricName; private final Version version; private final ZoneId zone; public NodeCountKey(String metricName, Version version, ZoneId zone) { this.metricName = metricName; this.version = version; this.zone = zone; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NodeCountKey that = (NodeCountKey) o; return metricName.equals(that.metricName) && version.equals(that.version) && zone.equals(that.zone); } @Override public int hashCode() { return Objects.hash(metricName, version, zone); } } }
You should probably use <= 0 in case there is some capping to positive number. As Hjallis said. "Me kan aldri gå på 0"
public Result doSearch2(Query query, Execution execution) { if (query.getTimeLeft() < 0) { return new Result(query, ErrorMessage.createTimeout(String.format("No time left for searching (timeout=%d)", query.getTimeout()))); } initializeMissingQueryFields(query); if (documentSelectionQueryParameterCount(query) != 1) { return new Result(query, ErrorMessage.createBackendCommunicationError("Streaming search needs one and " + "only one of these query parameters to be set: streaming.userid, streaming.groupname, " + "streaming.selection")); } query.trace("Routing to search cluster " + getSearchClusterConfigId() + " and document type " + documentType, 4); long timeStartedNanos = tracingOptions.getClock().nanoTimeNow(); int effectiveTraceLevel = inferEffectiveQueryTraceLevel(query); Visitor visitor = visitorFactory.createVisitor(query, getSearchClusterConfigId(), route, documentType, effectiveTraceLevel); try { visitor.doSearch(); } catch (ParseException e) { return new Result(query, ErrorMessage.createBackendCommunicationError( "Failed to parse document selection string: " + e.getMessage() + "'.")); } catch (TokenMgrException e) { return new Result(query, ErrorMessage.createBackendCommunicationError( "Failed to tokenize document selection string: " + e.getMessage() + "'.")); } catch (TimeoutException e) { double elapsedMillis = durationInMillisFromNanoTime(timeStartedNanos); if ((effectiveTraceLevel > 0) && timeoutBadEnoughToBeReported(query, elapsedMillis)) { tracingOptions.getTraceExporter().maybeExport(() -> new TraceDescription(visitor.getTrace(), String.format("Trace of %s which timed out after %.3g seconds", query.toString(), elapsedMillis / 1000.0))); } return new Result(query, ErrorMessage.createTimeout(e.getMessage())); } catch (InterruptedException | IllegalArgumentException e) { return new Result(query, ErrorMessage.createBackendCommunicationError(e.getMessage())); } return buildResultFromCompletedVisitor(query, visitor); }
if (query.getTimeLeft() < 0) {
public Result doSearch2(Query query, Execution execution) { if (query.getTimeLeft() <= 0) { return new Result(query, ErrorMessage.createTimeout(String.format("No time left for searching (timeout=%d)", query.getTimeout()))); } initializeMissingQueryFields(query); if (documentSelectionQueryParameterCount(query) != 1) { return new Result(query, ErrorMessage.createBackendCommunicationError("Streaming search needs one and " + "only one of these query parameters to be set: streaming.userid, streaming.groupname, " + "streaming.selection")); } query.trace("Routing to search cluster " + getSearchClusterConfigId() + " and document type " + documentType, 4); long timeStartedNanos = tracingOptions.getClock().nanoTimeNow(); int effectiveTraceLevel = inferEffectiveQueryTraceLevel(query); Visitor visitor = visitorFactory.createVisitor(query, getSearchClusterConfigId(), route, documentType, effectiveTraceLevel); try { visitor.doSearch(); } catch (ParseException e) { return new Result(query, ErrorMessage.createBackendCommunicationError( "Failed to parse document selection string: " + e.getMessage() + "'.")); } catch (TokenMgrException e) { return new Result(query, ErrorMessage.createBackendCommunicationError( "Failed to tokenize document selection string: " + e.getMessage() + "'.")); } catch (TimeoutException e) { double elapsedMillis = durationInMillisFromNanoTime(timeStartedNanos); if ((effectiveTraceLevel > 0) && timeoutBadEnoughToBeReported(query, elapsedMillis)) { tracingOptions.getTraceExporter().maybeExport(() -> new TraceDescription(visitor.getTrace(), String.format("Trace of %s which timed out after %.3g seconds", query.toString(), elapsedMillis / 1000.0))); } return new Result(query, ErrorMessage.createTimeout(e.getMessage())); } catch (InterruptedException | IllegalArgumentException e) { return new Result(query, ErrorMessage.createBackendCommunicationError(e.getMessage())); } return buildResultFromCompletedVisitor(query, visitor); }
class VdsVisitorFactory implements VisitorFactory { @Override public Visitor createVisitor(Query query, String searchCluster, Route route, String documentType, int traceLevelOverride) { return new VdsVisitor(query, searchCluster, route, documentType, traceLevelOverride); } }
class VdsVisitorFactory implements VisitorFactory { @Override public Visitor createVisitor(Query query, String searchCluster, Route route, String documentType, int traceLevelOverride) { return new VdsVisitor(query, searchCluster, route, documentType, traceLevelOverride); } }
not .getName() as for ClusterStatus ?
public ContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState, boolean zooKeeperLocalhostAffinity) { super(parent, configSubId); this.name = clusterId; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); this.zooKeeperLocalhostAffinity = zooKeeperLocalhostAffinity; componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addComponent(new DefaultThreadpoolProvider(this)); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider"); addSimpleComponent(com.yahoo.jdisc.http.server.jetty.Janitor.class); addJaxProviders(); }
addSimpleComponent(com.yahoo.jdisc.http.server.jetty.Janitor.class);
public ContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState, boolean zooKeeperLocalhostAffinity) { super(parent, configSubId); this.name = clusterId; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); this.zooKeeperLocalhostAffinity = zooKeeperLocalhostAffinity; componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addComponent(new DefaultThreadpoolProvider(this)); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider"); addSimpleComponent(com.yahoo.jdisc.http.server.jetty.Janitor.class); addJaxProviders(); }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, ConfigserverConfig.Producer, CuratorConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private final boolean zooKeeperLocalhostAffinity; private final Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private boolean deferChangesUntilRestart = false; public ClusterSpec.Id id() { return ClusterSpec.Id.from(getName()); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setOwner(this); container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.setApplyOnRestart(getDeferChangesUntilRestart()); builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(256) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { var compressionType = isHostedVespa ? AccessLogComponent.CompressionType.ZSTD : AccessLogComponent.CompressionType.GZIP; addComponent(new AccessLogComponent(this, AccessLogComponent.AccessLogType.jsonAccessLog, compressionType, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are running by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } @Override public void getConfig(CuratorConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (var container : containers) { builder.server(new CuratorConfig.Server.Builder().hostname(container.getHostResource().getHostname())); } builder.zookeeperLocalhostAffinity(zooKeeperLocalhostAffinity); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public String getEnvironmentVars() { return environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); /** * Mark whether the config emitted by this cluster currently should be applied by clients already running with * a previous generation of it only by restarting the consuming processes. */ public void setDeferChangesUntilRestart(boolean deferChangesUntilRestart) { this.deferChangesUntilRestart = deferChangesUntilRestart; } public boolean getDeferChangesUntilRestart() { return deferChangesUntilRestart; } /** Effective vcpu for the containers in cluster. Use this value as scale factor for performance/resource tuning. **/ public OptionalDouble vcpu() { return getContainers().stream() .filter(c -> c.getHostResource() != null && c.getHostResource().realResources() != null) .mapToDouble(c -> c.getHostResource().realResources().vcpu()) .max(); } }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, ConfigserverConfig.Producer, CuratorConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private final boolean zooKeeperLocalhostAffinity; private final Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private boolean deferChangesUntilRestart = false; public ClusterSpec.Id id() { return ClusterSpec.Id.from(getName()); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setOwner(this); container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.setApplyOnRestart(getDeferChangesUntilRestart()); builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(256) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { var compressionType = isHostedVespa ? AccessLogComponent.CompressionType.ZSTD : AccessLogComponent.CompressionType.GZIP; addComponent(new AccessLogComponent(this, AccessLogComponent.AccessLogType.jsonAccessLog, compressionType, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are running by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } @Override public void getConfig(CuratorConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (var container : containers) { builder.server(new CuratorConfig.Server.Builder().hostname(container.getHostResource().getHostname())); } builder.zookeeperLocalhostAffinity(zooKeeperLocalhostAffinity); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public String getEnvironmentVars() { return environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); /** * Mark whether the config emitted by this cluster currently should be applied by clients already running with * a previous generation of it only by restarting the consuming processes. */ public void setDeferChangesUntilRestart(boolean deferChangesUntilRestart) { this.deferChangesUntilRestart = deferChangesUntilRestart; } public boolean getDeferChangesUntilRestart() { return deferChangesUntilRestart; } /** Effective vcpu for the containers in cluster. Use this value as scale factor for performance/resource tuning. **/ public OptionalDouble vcpu() { return getContainers().stream() .filter(c -> c.getHostResource() != null && c.getHostResource().realResources() != null) .mapToDouble(c -> c.getHostResource().realResources().vcpu()) .max(); } }
``` private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } ```
public ContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState, boolean zooKeeperLocalhostAffinity) { super(parent, configSubId); this.name = clusterId; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); this.zooKeeperLocalhostAffinity = zooKeeperLocalhostAffinity; componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addComponent(new DefaultThreadpoolProvider(this)); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider"); addSimpleComponent(com.yahoo.jdisc.http.server.jetty.Janitor.class); addJaxProviders(); }
addSimpleComponent(com.yahoo.jdisc.http.server.jetty.Janitor.class);
public ContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState, boolean zooKeeperLocalhostAffinity) { super(parent, configSubId); this.name = clusterId; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); this.zooKeeperLocalhostAffinity = zooKeeperLocalhostAffinity; componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addComponent(new DefaultThreadpoolProvider(this)); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider"); addSimpleComponent(com.yahoo.jdisc.http.server.jetty.Janitor.class); addJaxProviders(); }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, ConfigserverConfig.Producer, CuratorConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private final boolean zooKeeperLocalhostAffinity; private final Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private boolean deferChangesUntilRestart = false; public ClusterSpec.Id id() { return ClusterSpec.Id.from(getName()); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setOwner(this); container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.setApplyOnRestart(getDeferChangesUntilRestart()); builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(256) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { var compressionType = isHostedVespa ? AccessLogComponent.CompressionType.ZSTD : AccessLogComponent.CompressionType.GZIP; addComponent(new AccessLogComponent(this, AccessLogComponent.AccessLogType.jsonAccessLog, compressionType, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are running by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } @Override public void getConfig(CuratorConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (var container : containers) { builder.server(new CuratorConfig.Server.Builder().hostname(container.getHostResource().getHostname())); } builder.zookeeperLocalhostAffinity(zooKeeperLocalhostAffinity); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public String getEnvironmentVars() { return environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); /** * Mark whether the config emitted by this cluster currently should be applied by clients already running with * a previous generation of it only by restarting the consuming processes. */ public void setDeferChangesUntilRestart(boolean deferChangesUntilRestart) { this.deferChangesUntilRestart = deferChangesUntilRestart; } public boolean getDeferChangesUntilRestart() { return deferChangesUntilRestart; } /** Effective vcpu for the containers in cluster. Use this value as scale factor for performance/resource tuning. **/ public OptionalDouble vcpu() { return getContainers().stream() .filter(c -> c.getHostResource() != null && c.getHostResource().realResources() != null) .mapToDouble(c -> c.getHostResource().realResources().vcpu()) .max(); } }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, ConfigserverConfig.Producer, CuratorConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private final boolean zooKeeperLocalhostAffinity; private final Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private boolean deferChangesUntilRestart = false; public ClusterSpec.Id id() { return ClusterSpec.Id.from(getName()); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setOwner(this); container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.setApplyOnRestart(getDeferChangesUntilRestart()); builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(256) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { var compressionType = isHostedVespa ? AccessLogComponent.CompressionType.ZSTD : AccessLogComponent.CompressionType.GZIP; addComponent(new AccessLogComponent(this, AccessLogComponent.AccessLogType.jsonAccessLog, compressionType, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are running by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } @Override public void getConfig(CuratorConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (var container : containers) { builder.server(new CuratorConfig.Server.Builder().hostname(container.getHostResource().getHostname())); } builder.zookeeperLocalhostAffinity(zooKeeperLocalhostAffinity); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public String getEnvironmentVars() { return environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); /** * Mark whether the config emitted by this cluster currently should be applied by clients already running with * a previous generation of it only by restarting the consuming processes. */ public void setDeferChangesUntilRestart(boolean deferChangesUntilRestart) { this.deferChangesUntilRestart = deferChangesUntilRestart; } public boolean getDeferChangesUntilRestart() { return deferChangesUntilRestart; } /** Effective vcpu for the containers in cluster. Use this value as scale factor for performance/resource tuning. **/ public OptionalDouble vcpu() { return getContainers().stream() .filter(c -> c.getHostResource() != null && c.getHostResource().realResources() != null) .mapToDouble(c -> c.getHostResource().realResources().vcpu()) .max(); } }
This will be `""` if not set. I would think `null` is a better choice?
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlime(json); Inspector params = slime.get(); return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool()) .dryRun(params.field(DRY_RUN_PARAM_NAME).asBool()) .verbose(params.field(VERBOSE_PARAM_NAME).asBool()) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(params.field(VESPA_VERSION_PARAM_NAME).asString()) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(params.field(FORCE_PARAM_NAME).asBool()) .waitForResourcesInPrepare(params.field(WAIT_FOR_RESOURCES_IN_PREPARE).asBool()) .build(); }
.vespaVersion(params.field(VESPA_VERSION_PARAM_NAME).asString())
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlimeOrThrow(json); Inspector params = slime.get(); return new Builder() .ignoreValidationErrors(booleanValue(params, IGNORE_VALIDATION_PARAM_NAME)) .dryRun(booleanValue(params, DRY_RUN_PARAM_NAME)) .verbose(booleanValue(params, VERBOSE_PARAM_NAME)) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(SlimeUtils.optionalString(params.field(VESPA_VERSION_PARAM_NAME)).orElse(null)) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(booleanValue(params, FORCE_PARAM_NAME)) .waitForResourcesInPrepare(booleanValue(params, WAIT_FOR_RESOURCES_IN_PREPARE)) .build(); }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
Is it possible to race with a concurrent removal from `targets.list` so that `randomizer.nextInt(targets.list.size())` returns a value that is greater than the max value accepted by `targets.list.get()`?
String getRandomTargetSpec(RoutingContext context) { Targets targets = validTargets.get(); while ((targets.total != 0) && (100 * targets.list.size() / targets.total >= requiredUpPercentageToSendToKnownGoodNodes)) { Integer distributor = targets.list.get(randomizer.nextInt(targets.list.size())); String targetSpec = getTargetSpec(distributor, context); if (targetSpec != null) { context.trace(3, "Sending to random node seen up in cluster state"); return targetSpec; } targets.list.remove(distributor); } context.trace(3, "Too few nodes seen up in state. Sending totally random."); return getTargetSpec(null, context); }
Integer distributor = targets.list.get(randomizer.nextInt(targets.list.size()));
String getRandomTargetSpec(RoutingContext context) { Targets targets = validTargets.get(); while ((targets.total != 0) && (100 * targets.list.size() / targets.total >= requiredUpPercentageToSendToKnownGoodNodes)) { Integer distributor = targets.list.get(randomizer.nextInt(targets.list.size())); String targetSpec = getTargetSpec(distributor, context); if (targetSpec != null) { context.trace(3, "Sending to random node seen up in cluster state"); return targetSpec; } targets.list.remove(distributor); } context.trace(3, "Too few nodes seen up in state. Sending totally random."); return getTargetSpec(null, context); }
class Targets { private final List<Integer> list; private final int total; Targets() { this(Collections.emptyList(), 1); } Targets(List<Integer> list, int total) { this.list = list; this.total = total; } }
class Targets { private final List<Integer> list; private final int total; Targets() { this(Collections.emptyList(), 1); } Targets(List<Integer> list, int total) { this.list = list; this.total = total; } }
Yes, it is... Will ponder a fix
String getRandomTargetSpec(RoutingContext context) { Targets targets = validTargets.get(); while ((targets.total != 0) && (100 * targets.list.size() / targets.total >= requiredUpPercentageToSendToKnownGoodNodes)) { Integer distributor = targets.list.get(randomizer.nextInt(targets.list.size())); String targetSpec = getTargetSpec(distributor, context); if (targetSpec != null) { context.trace(3, "Sending to random node seen up in cluster state"); return targetSpec; } targets.list.remove(distributor); } context.trace(3, "Too few nodes seen up in state. Sending totally random."); return getTargetSpec(null, context); }
Integer distributor = targets.list.get(randomizer.nextInt(targets.list.size()));
String getRandomTargetSpec(RoutingContext context) { Targets targets = validTargets.get(); while ((targets.total != 0) && (100 * targets.list.size() / targets.total >= requiredUpPercentageToSendToKnownGoodNodes)) { Integer distributor = targets.list.get(randomizer.nextInt(targets.list.size())); String targetSpec = getTargetSpec(distributor, context); if (targetSpec != null) { context.trace(3, "Sending to random node seen up in cluster state"); return targetSpec; } targets.list.remove(distributor); } context.trace(3, "Too few nodes seen up in state. Sending totally random."); return getTargetSpec(null, context); }
class Targets { private final List<Integer> list; private final int total; Targets() { this(Collections.emptyList(), 1); } Targets(List<Integer> list, int total) { this.list = list; this.total = total; } }
class Targets { private final List<Integer> list; private final int total; Targets() { this(Collections.emptyList(), 1); } Targets(List<Integer> list, int total) { this.list = list; this.total = total; } }
Just use synchronisation instead? Perhaps with round-robin.
String getRandomTargetSpec(RoutingContext context) { Targets targets = validTargets.get(); while ((targets.total != 0) && (100 * targets.list.size() / targets.total >= requiredUpPercentageToSendToKnownGoodNodes)) { Integer distributor = targets.list.get(randomizer.nextInt(targets.list.size())); String targetSpec = getTargetSpec(distributor, context); if (targetSpec != null) { context.trace(3, "Sending to random node seen up in cluster state"); return targetSpec; } targets.list.remove(distributor); } context.trace(3, "Too few nodes seen up in state. Sending totally random."); return getTargetSpec(null, context); }
Integer distributor = targets.list.get(randomizer.nextInt(targets.list.size()));
String getRandomTargetSpec(RoutingContext context) { Targets targets = validTargets.get(); while ((targets.total != 0) && (100 * targets.list.size() / targets.total >= requiredUpPercentageToSendToKnownGoodNodes)) { Integer distributor = targets.list.get(randomizer.nextInt(targets.list.size())); String targetSpec = getTargetSpec(distributor, context); if (targetSpec != null) { context.trace(3, "Sending to random node seen up in cluster state"); return targetSpec; } targets.list.remove(distributor); } context.trace(3, "Too few nodes seen up in state. Sending totally random."); return getTargetSpec(null, context); }
class Targets { private final List<Integer> list; private final int total; Targets() { this(Collections.emptyList(), 1); } Targets(List<Integer> list, int total) { this.list = list; this.total = total; } }
class Targets { private final List<Integer> list; private final int total; Targets() { this(Collections.emptyList(), 1); } Targets(List<Integer> list, int total) { this.list = list; this.total = total; } }
Can this be?
private void addReferenceToSslEngine(SocketChannelEndPoint endpoint, ConnectionInfo info, SSLEngine sslEngine) { if (sslEngine != null) { sslEngines.put(endpoint, sslEngine) .ifPresent(sslToConnectionInfo::remove); sslToConnectionInfo.put(sslEngine, info); } }
if (sslEngine != null) {
private void addReferenceToSslEngine(SocketChannelEndPoint endpoint, ConnectionInfo info, SSLEngine sslEngine) { if (sslEngine != null) { sslEngines.put(endpoint, sslEngine) .ifPresent(sslToConnectionInfo::remove); sslToConnectionInfo.put(sslEngine, info); } }
class JettyConnectionLogger extends AbstractLifeCycle implements Connection.Listener, HttpChannel.Listener, SslHandshakeListener { static final String CONNECTION_ID_REQUEST_ATTRIBUTE = "jdisc.request.connection.id"; private static final Logger log = Logger.getLogger(JettyConnectionLogger.class.getName()); private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, ConnectionInfo> connectionInfos = new SimpleConcurrentIdentityHashMap<>(); private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, SSLEngine> sslEngines = new SimpleConcurrentIdentityHashMap<>(); private final SimpleConcurrentIdentityHashMap<SSLEngine, ConnectionInfo> sslToConnectionInfo = new SimpleConcurrentIdentityHashMap<>(); private final boolean enabled; private final ConnectionLog connectionLog; JettyConnectionLogger(ServerConfig.ConnectionLog config, ConnectionLog connectionLog) { this.enabled = config.enabled(); this.connectionLog = connectionLog; log.log(Level.FINE, () -> "Jetty connection logger is " + (config.enabled() ? "enabled" : "disabled")); } @Override protected void doStop() { handleListenerInvocation("AbstractLifeCycle", "doStop", "", List.of(), () -> { log.log(Level.FINE, () -> "Jetty connection logger is stopped"); }); } @Override protected void doStart() { handleListenerInvocation("AbstractLifeCycle", "doStart", "", List.of(), () -> { log.log(Level.FINE, () -> "Jetty connection logger is started"); }); } @Override public void onOpened(Connection connection) { handleListenerInvocation("Connection.Listener", "onOpened", "%h", List.of(connection), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint()); ConnectionInfo info = connectionInfos.computeIfAbsent(endpoint, ConnectionInfo::from); String connectionClassName = connection.getClass().getSimpleName(); if (connection instanceof SslConnection) { SSLEngine sslEngine = ((SslConnection) connection).getSSLEngine(); addReferenceToSslEngine(endpoint, info, sslEngine); } else if (connection instanceof ALPNServerConnection) { SSLEngine sslEngine = ((ALPNServerConnection) connection).getSSLEngine(); addReferenceToSslEngine(endpoint, info, sslEngine); } else if (connection instanceof HttpConnection) { info.setHttpProtocol("HTTP/1.1"); } else if (connection instanceof HTTP2ServerConnection) { info.setHttpProtocol("HTTP/2.0"); } else if (connectionClassName.endsWith("ProxyProtocolV1Connection")) { info.setProxyProtocolVersion("v1"); } else if (connectionClassName.endsWith("ProxyProtocolV2Connection")) { info.setProxyProtocolVersion("v2"); } if (connection.getEndPoint() instanceof ProxyConnectionFactory.ProxyEndPoint) { InetSocketAddress remoteAddress = connection.getEndPoint().getRemoteAddress(); info.setRemoteAddress(remoteAddress); } }); } @Override public void onClosed(Connection connection) { handleListenerInvocation("Connection.Listener", "onClosed", "%h", List.of(connection), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).orElse(null); if (info == null) return; if (connection instanceof HttpConnection) { info.setHttpBytes(connection.getBytesIn(), connection.getBytesOut()); } if (!endpoint.isOpen()) { info.setClosedAt(System.currentTimeMillis()); connectionLog.log(info.toLogEntry()); connectionInfos.remove(endpoint); sslEngines.remove(endpoint) .ifPresent(sslToConnectionInfo::remove); } }); } @Override public void onRequestBegin(Request request) { handleListenerInvocation("HttpChannel.Listener", "onRequestBegin", "%h", List.of(request), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).get(); info.incrementRequests(); request.setAttribute(CONNECTION_ID_REQUEST_ATTRIBUTE, info.uuid()); }); } @Override public void onResponseBegin(Request request) { handleListenerInvocation("HttpChannel.Listener", "onResponseBegin", "%h", List.of(request), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).orElse(null); if (info == null) return; info.incrementResponses(); }); } @Override public void handshakeSucceeded(Event event) { SSLEngine sslEngine = event.getSSLEngine(); handleListenerInvocation("SslHandshakeListener", "handshakeSucceeded", "sslEngine=%h", List.of(sslEngine), () -> { ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null); if (info == null) return; info.setSslSessionDetails(sslEngine.getSession()); }); } @Override public void handshakeFailed(Event event, Throwable failure) { SSLEngine sslEngine = event.getSSLEngine(); handleListenerInvocation("SslHandshakeListener", "handshakeFailed", "sslEngine=%h,failure=%s", List.of(sslEngine, failure), () -> { log.log(Level.FINE, failure, failure::toString); ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null); if (info == null) return; info.setSslHandshakeFailure((SSLHandshakeException)failure); }); } private void handleListenerInvocation( String listenerType, String methodName, String methodArgumentsFormat, List<Object> methodArguments, ListenerHandler handler) { if (!enabled) return; try { log.log(Level.FINE, () -> String.format(listenerType + "." + methodName + "(" + methodArgumentsFormat + ")", methodArguments.toArray())); handler.run(); } catch (Exception e) { log.log(Level.WARNING, String.format("Exception in %s.%s listener: %s", listenerType, methodName, e.getMessage()), e); } } /** * Protocol layers are connected through each {@link Connection}'s {@link EndPoint} reference. * This methods iterates through the endpoints recursively to find the underlying socket endpoint. */ private static SocketChannelEndPoint findUnderlyingSocketEndpoint(EndPoint endpoint) { if (endpoint instanceof SocketChannelEndPoint) { return (SocketChannelEndPoint) endpoint; } else if (endpoint instanceof SslConnection.DecryptedEndPoint) { var decryptedEndpoint = (SslConnection.DecryptedEndPoint) endpoint; return findUnderlyingSocketEndpoint(decryptedEndpoint.getSslConnection().getEndPoint()); } else if (endpoint instanceof ProxyConnectionFactory.ProxyEndPoint) { var proxyEndpoint = (ProxyConnectionFactory.ProxyEndPoint) endpoint; return findUnderlyingSocketEndpoint(proxyEndpoint.unwrap()); } else { throw new IllegalArgumentException("Unknown connection endpoint type: " + endpoint.getClass().getName()); } } @FunctionalInterface private interface ListenerHandler { void run() throws Exception; } private static class ConnectionInfo { private final UUID uuid; private final long createdAt; private final InetSocketAddress localAddress; private final InetSocketAddress peerAddress; private long closedAt = 0; private long httpBytesReceived = 0; private long httpBytesSent = 0; private long requests = 0; private long responses = 0; private InetSocketAddress remoteAddress; private byte[] sslSessionId; private String sslProtocol; private String sslCipherSuite; private String sslPeerSubject; private Date sslPeerNotBefore; private Date sslPeerNotAfter; private List<SNIServerName> sslSniServerNames; private SSLHandshakeException sslHandshakeException; private String proxyProtocolVersion; private String httpProtocol; private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) { this.uuid = uuid; this.createdAt = createdAt; this.localAddress = localAddress; this.peerAddress = peerAddress; } static ConnectionInfo from(SocketChannelEndPoint endpoint) { return new ConnectionInfo( UUID.randomUUID(), endpoint.getCreatedTimeStamp(), endpoint.getLocalAddress(), endpoint.getRemoteAddress()); } synchronized UUID uuid() { return uuid; } synchronized ConnectionInfo setClosedAt(long closedAt) { this.closedAt = closedAt; return this; } synchronized ConnectionInfo setHttpBytes(long received, long sent) { this.httpBytesReceived = received; this.httpBytesSent = sent; return this; } synchronized ConnectionInfo incrementRequests() { ++this.requests; return this; } synchronized ConnectionInfo incrementResponses() { ++this.responses; return this; } synchronized ConnectionInfo setRemoteAddress(InetSocketAddress remoteAddress) { this.remoteAddress = remoteAddress; return this; } synchronized ConnectionInfo setSslSessionDetails(SSLSession session) { this.sslCipherSuite = session.getCipherSuite(); this.sslProtocol = session.getProtocol(); this.sslSessionId = session.getId(); if (session instanceof ExtendedSSLSession) { ExtendedSSLSession extendedSession = (ExtendedSSLSession) session; this.sslSniServerNames = extendedSession.getRequestedServerNames(); } try { this.sslPeerSubject = session.getPeerPrincipal().getName(); X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0]; this.sslPeerNotBefore = peerCertificate.getNotBefore(); this.sslPeerNotAfter = peerCertificate.getNotAfter(); } catch (SSLPeerUnverifiedException e) { } return this; } synchronized ConnectionInfo setSslHandshakeFailure(SSLHandshakeException exception) { this.sslHandshakeException = exception; return this; } synchronized ConnectionInfo setHttpProtocol(String protocol) { this.httpProtocol = protocol; return this; } synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; } synchronized ConnectionLogEntry toLogEntry() { ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt)); if (closedAt > 0) { builder.withDuration((closedAt - createdAt) / 1000D); } if (httpBytesReceived > 0) { builder.withHttpBytesReceived(httpBytesReceived); } if (httpBytesSent > 0) { builder.withHttpBytesSent(httpBytesSent); } if (requests > 0) { builder.withRequests(requests); } if (responses > 0) { builder.withResponses(responses); } if (peerAddress != null) { builder.withPeerAddress(peerAddress.getHostString()) .withPeerPort(peerAddress.getPort()); } if (localAddress != null) { builder.withLocalAddress(localAddress.getHostString()) .withLocalPort(localAddress.getPort()); } if (remoteAddress != null) { builder.withRemoteAddress(remoteAddress.getHostString()) .withRemotePort(remoteAddress.getPort()); } if (sslProtocol != null && sslCipherSuite != null && sslSessionId != null) { builder.withSslProtocol(sslProtocol) .withSslCipherSuite(sslCipherSuite) .withSslSessionId(HexDump.toHexString(sslSessionId)); } if (sslSniServerNames != null) { sslSniServerNames.stream() .filter(name -> name instanceof SNIHostName && name.getType() == StandardConstants.SNI_HOST_NAME) .map(name -> ((SNIHostName) name).getAsciiName()) .findAny() .ifPresent(builder::withSslSniServerName); } if (sslPeerSubject != null && sslPeerNotAfter != null && sslPeerNotBefore != null) { builder.withSslPeerSubject(sslPeerSubject) .withSslPeerNotAfter(sslPeerNotAfter.toInstant()) .withSslPeerNotBefore(sslPeerNotBefore.toInstant()); } if (sslHandshakeException != null) { List<ExceptionEntry> exceptionChain = new ArrayList<>(); Throwable cause = sslHandshakeException; while (cause != null) { exceptionChain.add(new ExceptionEntry(cause.getClass().getName(), cause.getMessage())); cause = cause.getCause(); } String type = SslHandshakeFailure.fromSslHandshakeException(sslHandshakeException) .map(SslHandshakeFailure::failureType) .orElse("UNKNOWN"); builder.withSslHandshakeFailure(new ConnectionLogEntry.SslHandshakeFailure(type, exceptionChain)); } if (httpProtocol != null) { builder.withHttpProtocol(httpProtocol); } if (proxyProtocolVersion != null) { builder.withProxyProtocolVersion(proxyProtocolVersion); } return builder.build(); } } }
class JettyConnectionLogger extends AbstractLifeCycle implements Connection.Listener, HttpChannel.Listener, SslHandshakeListener { static final String CONNECTION_ID_REQUEST_ATTRIBUTE = "jdisc.request.connection.id"; private static final Logger log = Logger.getLogger(JettyConnectionLogger.class.getName()); private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, ConnectionInfo> connectionInfos = new SimpleConcurrentIdentityHashMap<>(); private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, SSLEngine> sslEngines = new SimpleConcurrentIdentityHashMap<>(); private final SimpleConcurrentIdentityHashMap<SSLEngine, ConnectionInfo> sslToConnectionInfo = new SimpleConcurrentIdentityHashMap<>(); private final boolean enabled; private final ConnectionLog connectionLog; JettyConnectionLogger(ServerConfig.ConnectionLog config, ConnectionLog connectionLog) { this.enabled = config.enabled(); this.connectionLog = connectionLog; log.log(Level.FINE, () -> "Jetty connection logger is " + (config.enabled() ? "enabled" : "disabled")); } @Override protected void doStop() { handleListenerInvocation("AbstractLifeCycle", "doStop", "", List.of(), () -> { log.log(Level.FINE, () -> "Jetty connection logger is stopped"); }); } @Override protected void doStart() { handleListenerInvocation("AbstractLifeCycle", "doStart", "", List.of(), () -> { log.log(Level.FINE, () -> "Jetty connection logger is started"); }); } @Override public void onOpened(Connection connection) { handleListenerInvocation("Connection.Listener", "onOpened", "%h", List.of(connection), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint()); ConnectionInfo info = connectionInfos.computeIfAbsent(endpoint, ConnectionInfo::from); String connectionClassName = connection.getClass().getSimpleName(); if (connection instanceof SslConnection) { SSLEngine sslEngine = ((SslConnection) connection).getSSLEngine(); addReferenceToSslEngine(endpoint, info, sslEngine); } else if (connection instanceof ALPNServerConnection) { SSLEngine sslEngine = ((ALPNServerConnection) connection).getSSLEngine(); addReferenceToSslEngine(endpoint, info, sslEngine); } else if (connection instanceof HttpConnection) { info.setHttpProtocol("HTTP/1.1"); } else if (connection instanceof HTTP2ServerConnection) { info.setHttpProtocol("HTTP/2.0"); } else if (connectionClassName.endsWith("ProxyProtocolV1Connection")) { info.setProxyProtocolVersion("v1"); } else if (connectionClassName.endsWith("ProxyProtocolV2Connection")) { info.setProxyProtocolVersion("v2"); } if (connection.getEndPoint() instanceof ProxyConnectionFactory.ProxyEndPoint) { InetSocketAddress remoteAddress = connection.getEndPoint().getRemoteAddress(); info.setRemoteAddress(remoteAddress); } }); } @Override public void onClosed(Connection connection) { handleListenerInvocation("Connection.Listener", "onClosed", "%h", List.of(connection), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).orElse(null); if (info == null) return; if (connection instanceof HttpConnection) { info.setHttpBytes(connection.getBytesIn(), connection.getBytesOut()); } if (!endpoint.isOpen()) { info.setClosedAt(System.currentTimeMillis()); connectionLog.log(info.toLogEntry()); connectionInfos.remove(endpoint); sslEngines.remove(endpoint) .ifPresent(sslToConnectionInfo::remove); } }); } @Override public void onRequestBegin(Request request) { handleListenerInvocation("HttpChannel.Listener", "onRequestBegin", "%h", List.of(request), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).get(); info.incrementRequests(); request.setAttribute(CONNECTION_ID_REQUEST_ATTRIBUTE, info.uuid()); }); } @Override public void onResponseBegin(Request request) { handleListenerInvocation("HttpChannel.Listener", "onResponseBegin", "%h", List.of(request), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).orElse(null); if (info == null) return; info.incrementResponses(); }); } @Override public void handshakeSucceeded(Event event) { SSLEngine sslEngine = event.getSSLEngine(); handleListenerInvocation("SslHandshakeListener", "handshakeSucceeded", "sslEngine=%h", List.of(sslEngine), () -> { ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null); if (info == null) return; info.setSslSessionDetails(sslEngine.getSession()); }); } @Override public void handshakeFailed(Event event, Throwable failure) { SSLEngine sslEngine = event.getSSLEngine(); handleListenerInvocation("SslHandshakeListener", "handshakeFailed", "sslEngine=%h,failure=%s", List.of(sslEngine, failure), () -> { log.log(Level.FINE, failure, failure::toString); ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null); if (info == null) return; info.setSslHandshakeFailure((SSLHandshakeException)failure); }); } private void handleListenerInvocation( String listenerType, String methodName, String methodArgumentsFormat, List<Object> methodArguments, ListenerHandler handler) { if (!enabled) return; try { log.log(Level.FINE, () -> String.format(listenerType + "." + methodName + "(" + methodArgumentsFormat + ")", methodArguments.toArray())); handler.run(); } catch (Exception e) { log.log(Level.WARNING, String.format("Exception in %s.%s listener: %s", listenerType, methodName, e.getMessage()), e); } } /** * Protocol layers are connected through each {@link Connection}'s {@link EndPoint} reference. * This methods iterates through the endpoints recursively to find the underlying socket endpoint. */ private static SocketChannelEndPoint findUnderlyingSocketEndpoint(EndPoint endpoint) { if (endpoint instanceof SocketChannelEndPoint) { return (SocketChannelEndPoint) endpoint; } else if (endpoint instanceof SslConnection.DecryptedEndPoint) { var decryptedEndpoint = (SslConnection.DecryptedEndPoint) endpoint; return findUnderlyingSocketEndpoint(decryptedEndpoint.getSslConnection().getEndPoint()); } else if (endpoint instanceof ProxyConnectionFactory.ProxyEndPoint) { var proxyEndpoint = (ProxyConnectionFactory.ProxyEndPoint) endpoint; return findUnderlyingSocketEndpoint(proxyEndpoint.unwrap()); } else { throw new IllegalArgumentException("Unknown connection endpoint type: " + endpoint.getClass().getName()); } } @FunctionalInterface private interface ListenerHandler { void run() throws Exception; } private static class ConnectionInfo { private final UUID uuid; private final long createdAt; private final InetSocketAddress localAddress; private final InetSocketAddress peerAddress; private long closedAt = 0; private long httpBytesReceived = 0; private long httpBytesSent = 0; private long requests = 0; private long responses = 0; private InetSocketAddress remoteAddress; private byte[] sslSessionId; private String sslProtocol; private String sslCipherSuite; private String sslPeerSubject; private Date sslPeerNotBefore; private Date sslPeerNotAfter; private List<SNIServerName> sslSniServerNames; private SSLHandshakeException sslHandshakeException; private String proxyProtocolVersion; private String httpProtocol; private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) { this.uuid = uuid; this.createdAt = createdAt; this.localAddress = localAddress; this.peerAddress = peerAddress; } static ConnectionInfo from(SocketChannelEndPoint endpoint) { return new ConnectionInfo( UUID.randomUUID(), endpoint.getCreatedTimeStamp(), endpoint.getLocalAddress(), endpoint.getRemoteAddress()); } synchronized UUID uuid() { return uuid; } synchronized ConnectionInfo setClosedAt(long closedAt) { this.closedAt = closedAt; return this; } synchronized ConnectionInfo setHttpBytes(long received, long sent) { this.httpBytesReceived = received; this.httpBytesSent = sent; return this; } synchronized ConnectionInfo incrementRequests() { ++this.requests; return this; } synchronized ConnectionInfo incrementResponses() { ++this.responses; return this; } synchronized ConnectionInfo setRemoteAddress(InetSocketAddress remoteAddress) { this.remoteAddress = remoteAddress; return this; } synchronized ConnectionInfo setSslSessionDetails(SSLSession session) { this.sslCipherSuite = session.getCipherSuite(); this.sslProtocol = session.getProtocol(); this.sslSessionId = session.getId(); if (session instanceof ExtendedSSLSession) { ExtendedSSLSession extendedSession = (ExtendedSSLSession) session; this.sslSniServerNames = extendedSession.getRequestedServerNames(); } try { this.sslPeerSubject = session.getPeerPrincipal().getName(); X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0]; this.sslPeerNotBefore = peerCertificate.getNotBefore(); this.sslPeerNotAfter = peerCertificate.getNotAfter(); } catch (SSLPeerUnverifiedException e) { } return this; } synchronized ConnectionInfo setSslHandshakeFailure(SSLHandshakeException exception) { this.sslHandshakeException = exception; return this; } synchronized ConnectionInfo setHttpProtocol(String protocol) { this.httpProtocol = protocol; return this; } synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; } synchronized ConnectionLogEntry toLogEntry() { ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt)); if (closedAt > 0) { builder.withDuration((closedAt - createdAt) / 1000D); } if (httpBytesReceived > 0) { builder.withHttpBytesReceived(httpBytesReceived); } if (httpBytesSent > 0) { builder.withHttpBytesSent(httpBytesSent); } if (requests > 0) { builder.withRequests(requests); } if (responses > 0) { builder.withResponses(responses); } if (peerAddress != null) { builder.withPeerAddress(peerAddress.getHostString()) .withPeerPort(peerAddress.getPort()); } if (localAddress != null) { builder.withLocalAddress(localAddress.getHostString()) .withLocalPort(localAddress.getPort()); } if (remoteAddress != null) { builder.withRemoteAddress(remoteAddress.getHostString()) .withRemotePort(remoteAddress.getPort()); } if (sslProtocol != null && sslCipherSuite != null && sslSessionId != null) { builder.withSslProtocol(sslProtocol) .withSslCipherSuite(sslCipherSuite) .withSslSessionId(HexDump.toHexString(sslSessionId)); } if (sslSniServerNames != null) { sslSniServerNames.stream() .filter(name -> name instanceof SNIHostName && name.getType() == StandardConstants.SNI_HOST_NAME) .map(name -> ((SNIHostName) name).getAsciiName()) .findAny() .ifPresent(builder::withSslSniServerName); } if (sslPeerSubject != null && sslPeerNotAfter != null && sslPeerNotBefore != null) { builder.withSslPeerSubject(sslPeerSubject) .withSslPeerNotAfter(sslPeerNotAfter.toInstant()) .withSslPeerNotBefore(sslPeerNotBefore.toInstant()); } if (sslHandshakeException != null) { List<ExceptionEntry> exceptionChain = new ArrayList<>(); Throwable cause = sslHandshakeException; while (cause != null) { exceptionChain.add(new ExceptionEntry(cause.getClass().getName(), cause.getMessage())); cause = cause.getCause(); } String type = SslHandshakeFailure.fromSslHandshakeException(sslHandshakeException) .map(SslHandshakeFailure::failureType) .orElse("UNKNOWN"); builder.withSslHandshakeFailure(new ConnectionLogEntry.SslHandshakeFailure(type, exceptionChain)); } if (httpProtocol != null) { builder.withHttpProtocol(httpProtocol); } if (proxyProtocolVersion != null) { builder.withProxyProtocolVersion(proxyProtocolVersion); } return builder.build(); } } }
At least for ALPN, but probably not for SSL connection.
private void addReferenceToSslEngine(SocketChannelEndPoint endpoint, ConnectionInfo info, SSLEngine sslEngine) { if (sslEngine != null) { sslEngines.put(endpoint, sslEngine) .ifPresent(sslToConnectionInfo::remove); sslToConnectionInfo.put(sslEngine, info); } }
if (sslEngine != null) {
private void addReferenceToSslEngine(SocketChannelEndPoint endpoint, ConnectionInfo info, SSLEngine sslEngine) { if (sslEngine != null) { sslEngines.put(endpoint, sslEngine) .ifPresent(sslToConnectionInfo::remove); sslToConnectionInfo.put(sslEngine, info); } }
class JettyConnectionLogger extends AbstractLifeCycle implements Connection.Listener, HttpChannel.Listener, SslHandshakeListener { static final String CONNECTION_ID_REQUEST_ATTRIBUTE = "jdisc.request.connection.id"; private static final Logger log = Logger.getLogger(JettyConnectionLogger.class.getName()); private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, ConnectionInfo> connectionInfos = new SimpleConcurrentIdentityHashMap<>(); private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, SSLEngine> sslEngines = new SimpleConcurrentIdentityHashMap<>(); private final SimpleConcurrentIdentityHashMap<SSLEngine, ConnectionInfo> sslToConnectionInfo = new SimpleConcurrentIdentityHashMap<>(); private final boolean enabled; private final ConnectionLog connectionLog; JettyConnectionLogger(ServerConfig.ConnectionLog config, ConnectionLog connectionLog) { this.enabled = config.enabled(); this.connectionLog = connectionLog; log.log(Level.FINE, () -> "Jetty connection logger is " + (config.enabled() ? "enabled" : "disabled")); } @Override protected void doStop() { handleListenerInvocation("AbstractLifeCycle", "doStop", "", List.of(), () -> { log.log(Level.FINE, () -> "Jetty connection logger is stopped"); }); } @Override protected void doStart() { handleListenerInvocation("AbstractLifeCycle", "doStart", "", List.of(), () -> { log.log(Level.FINE, () -> "Jetty connection logger is started"); }); } @Override public void onOpened(Connection connection) { handleListenerInvocation("Connection.Listener", "onOpened", "%h", List.of(connection), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint()); ConnectionInfo info = connectionInfos.computeIfAbsent(endpoint, ConnectionInfo::from); String connectionClassName = connection.getClass().getSimpleName(); if (connection instanceof SslConnection) { SSLEngine sslEngine = ((SslConnection) connection).getSSLEngine(); addReferenceToSslEngine(endpoint, info, sslEngine); } else if (connection instanceof ALPNServerConnection) { SSLEngine sslEngine = ((ALPNServerConnection) connection).getSSLEngine(); addReferenceToSslEngine(endpoint, info, sslEngine); } else if (connection instanceof HttpConnection) { info.setHttpProtocol("HTTP/1.1"); } else if (connection instanceof HTTP2ServerConnection) { info.setHttpProtocol("HTTP/2.0"); } else if (connectionClassName.endsWith("ProxyProtocolV1Connection")) { info.setProxyProtocolVersion("v1"); } else if (connectionClassName.endsWith("ProxyProtocolV2Connection")) { info.setProxyProtocolVersion("v2"); } if (connection.getEndPoint() instanceof ProxyConnectionFactory.ProxyEndPoint) { InetSocketAddress remoteAddress = connection.getEndPoint().getRemoteAddress(); info.setRemoteAddress(remoteAddress); } }); } @Override public void onClosed(Connection connection) { handleListenerInvocation("Connection.Listener", "onClosed", "%h", List.of(connection), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).orElse(null); if (info == null) return; if (connection instanceof HttpConnection) { info.setHttpBytes(connection.getBytesIn(), connection.getBytesOut()); } if (!endpoint.isOpen()) { info.setClosedAt(System.currentTimeMillis()); connectionLog.log(info.toLogEntry()); connectionInfos.remove(endpoint); sslEngines.remove(endpoint) .ifPresent(sslToConnectionInfo::remove); } }); } @Override public void onRequestBegin(Request request) { handleListenerInvocation("HttpChannel.Listener", "onRequestBegin", "%h", List.of(request), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).get(); info.incrementRequests(); request.setAttribute(CONNECTION_ID_REQUEST_ATTRIBUTE, info.uuid()); }); } @Override public void onResponseBegin(Request request) { handleListenerInvocation("HttpChannel.Listener", "onResponseBegin", "%h", List.of(request), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).orElse(null); if (info == null) return; info.incrementResponses(); }); } @Override public void handshakeSucceeded(Event event) { SSLEngine sslEngine = event.getSSLEngine(); handleListenerInvocation("SslHandshakeListener", "handshakeSucceeded", "sslEngine=%h", List.of(sslEngine), () -> { ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null); if (info == null) return; info.setSslSessionDetails(sslEngine.getSession()); }); } @Override public void handshakeFailed(Event event, Throwable failure) { SSLEngine sslEngine = event.getSSLEngine(); handleListenerInvocation("SslHandshakeListener", "handshakeFailed", "sslEngine=%h,failure=%s", List.of(sslEngine, failure), () -> { log.log(Level.FINE, failure, failure::toString); ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null); if (info == null) return; info.setSslHandshakeFailure((SSLHandshakeException)failure); }); } private void handleListenerInvocation( String listenerType, String methodName, String methodArgumentsFormat, List<Object> methodArguments, ListenerHandler handler) { if (!enabled) return; try { log.log(Level.FINE, () -> String.format(listenerType + "." + methodName + "(" + methodArgumentsFormat + ")", methodArguments.toArray())); handler.run(); } catch (Exception e) { log.log(Level.WARNING, String.format("Exception in %s.%s listener: %s", listenerType, methodName, e.getMessage()), e); } } /** * Protocol layers are connected through each {@link Connection}'s {@link EndPoint} reference. * This methods iterates through the endpoints recursively to find the underlying socket endpoint. */ private static SocketChannelEndPoint findUnderlyingSocketEndpoint(EndPoint endpoint) { if (endpoint instanceof SocketChannelEndPoint) { return (SocketChannelEndPoint) endpoint; } else if (endpoint instanceof SslConnection.DecryptedEndPoint) { var decryptedEndpoint = (SslConnection.DecryptedEndPoint) endpoint; return findUnderlyingSocketEndpoint(decryptedEndpoint.getSslConnection().getEndPoint()); } else if (endpoint instanceof ProxyConnectionFactory.ProxyEndPoint) { var proxyEndpoint = (ProxyConnectionFactory.ProxyEndPoint) endpoint; return findUnderlyingSocketEndpoint(proxyEndpoint.unwrap()); } else { throw new IllegalArgumentException("Unknown connection endpoint type: " + endpoint.getClass().getName()); } } @FunctionalInterface private interface ListenerHandler { void run() throws Exception; } private static class ConnectionInfo { private final UUID uuid; private final long createdAt; private final InetSocketAddress localAddress; private final InetSocketAddress peerAddress; private long closedAt = 0; private long httpBytesReceived = 0; private long httpBytesSent = 0; private long requests = 0; private long responses = 0; private InetSocketAddress remoteAddress; private byte[] sslSessionId; private String sslProtocol; private String sslCipherSuite; private String sslPeerSubject; private Date sslPeerNotBefore; private Date sslPeerNotAfter; private List<SNIServerName> sslSniServerNames; private SSLHandshakeException sslHandshakeException; private String proxyProtocolVersion; private String httpProtocol; private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) { this.uuid = uuid; this.createdAt = createdAt; this.localAddress = localAddress; this.peerAddress = peerAddress; } static ConnectionInfo from(SocketChannelEndPoint endpoint) { return new ConnectionInfo( UUID.randomUUID(), endpoint.getCreatedTimeStamp(), endpoint.getLocalAddress(), endpoint.getRemoteAddress()); } synchronized UUID uuid() { return uuid; } synchronized ConnectionInfo setClosedAt(long closedAt) { this.closedAt = closedAt; return this; } synchronized ConnectionInfo setHttpBytes(long received, long sent) { this.httpBytesReceived = received; this.httpBytesSent = sent; return this; } synchronized ConnectionInfo incrementRequests() { ++this.requests; return this; } synchronized ConnectionInfo incrementResponses() { ++this.responses; return this; } synchronized ConnectionInfo setRemoteAddress(InetSocketAddress remoteAddress) { this.remoteAddress = remoteAddress; return this; } synchronized ConnectionInfo setSslSessionDetails(SSLSession session) { this.sslCipherSuite = session.getCipherSuite(); this.sslProtocol = session.getProtocol(); this.sslSessionId = session.getId(); if (session instanceof ExtendedSSLSession) { ExtendedSSLSession extendedSession = (ExtendedSSLSession) session; this.sslSniServerNames = extendedSession.getRequestedServerNames(); } try { this.sslPeerSubject = session.getPeerPrincipal().getName(); X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0]; this.sslPeerNotBefore = peerCertificate.getNotBefore(); this.sslPeerNotAfter = peerCertificate.getNotAfter(); } catch (SSLPeerUnverifiedException e) { } return this; } synchronized ConnectionInfo setSslHandshakeFailure(SSLHandshakeException exception) { this.sslHandshakeException = exception; return this; } synchronized ConnectionInfo setHttpProtocol(String protocol) { this.httpProtocol = protocol; return this; } synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; } synchronized ConnectionLogEntry toLogEntry() { ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt)); if (closedAt > 0) { builder.withDuration((closedAt - createdAt) / 1000D); } if (httpBytesReceived > 0) { builder.withHttpBytesReceived(httpBytesReceived); } if (httpBytesSent > 0) { builder.withHttpBytesSent(httpBytesSent); } if (requests > 0) { builder.withRequests(requests); } if (responses > 0) { builder.withResponses(responses); } if (peerAddress != null) { builder.withPeerAddress(peerAddress.getHostString()) .withPeerPort(peerAddress.getPort()); } if (localAddress != null) { builder.withLocalAddress(localAddress.getHostString()) .withLocalPort(localAddress.getPort()); } if (remoteAddress != null) { builder.withRemoteAddress(remoteAddress.getHostString()) .withRemotePort(remoteAddress.getPort()); } if (sslProtocol != null && sslCipherSuite != null && sslSessionId != null) { builder.withSslProtocol(sslProtocol) .withSslCipherSuite(sslCipherSuite) .withSslSessionId(HexDump.toHexString(sslSessionId)); } if (sslSniServerNames != null) { sslSniServerNames.stream() .filter(name -> name instanceof SNIHostName && name.getType() == StandardConstants.SNI_HOST_NAME) .map(name -> ((SNIHostName) name).getAsciiName()) .findAny() .ifPresent(builder::withSslSniServerName); } if (sslPeerSubject != null && sslPeerNotAfter != null && sslPeerNotBefore != null) { builder.withSslPeerSubject(sslPeerSubject) .withSslPeerNotAfter(sslPeerNotAfter.toInstant()) .withSslPeerNotBefore(sslPeerNotBefore.toInstant()); } if (sslHandshakeException != null) { List<ExceptionEntry> exceptionChain = new ArrayList<>(); Throwable cause = sslHandshakeException; while (cause != null) { exceptionChain.add(new ExceptionEntry(cause.getClass().getName(), cause.getMessage())); cause = cause.getCause(); } String type = SslHandshakeFailure.fromSslHandshakeException(sslHandshakeException) .map(SslHandshakeFailure::failureType) .orElse("UNKNOWN"); builder.withSslHandshakeFailure(new ConnectionLogEntry.SslHandshakeFailure(type, exceptionChain)); } if (httpProtocol != null) { builder.withHttpProtocol(httpProtocol); } if (proxyProtocolVersion != null) { builder.withProxyProtocolVersion(proxyProtocolVersion); } return builder.build(); } } }
class JettyConnectionLogger extends AbstractLifeCycle implements Connection.Listener, HttpChannel.Listener, SslHandshakeListener { static final String CONNECTION_ID_REQUEST_ATTRIBUTE = "jdisc.request.connection.id"; private static final Logger log = Logger.getLogger(JettyConnectionLogger.class.getName()); private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, ConnectionInfo> connectionInfos = new SimpleConcurrentIdentityHashMap<>(); private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, SSLEngine> sslEngines = new SimpleConcurrentIdentityHashMap<>(); private final SimpleConcurrentIdentityHashMap<SSLEngine, ConnectionInfo> sslToConnectionInfo = new SimpleConcurrentIdentityHashMap<>(); private final boolean enabled; private final ConnectionLog connectionLog; JettyConnectionLogger(ServerConfig.ConnectionLog config, ConnectionLog connectionLog) { this.enabled = config.enabled(); this.connectionLog = connectionLog; log.log(Level.FINE, () -> "Jetty connection logger is " + (config.enabled() ? "enabled" : "disabled")); } @Override protected void doStop() { handleListenerInvocation("AbstractLifeCycle", "doStop", "", List.of(), () -> { log.log(Level.FINE, () -> "Jetty connection logger is stopped"); }); } @Override protected void doStart() { handleListenerInvocation("AbstractLifeCycle", "doStart", "", List.of(), () -> { log.log(Level.FINE, () -> "Jetty connection logger is started"); }); } @Override public void onOpened(Connection connection) { handleListenerInvocation("Connection.Listener", "onOpened", "%h", List.of(connection), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint()); ConnectionInfo info = connectionInfos.computeIfAbsent(endpoint, ConnectionInfo::from); String connectionClassName = connection.getClass().getSimpleName(); if (connection instanceof SslConnection) { SSLEngine sslEngine = ((SslConnection) connection).getSSLEngine(); addReferenceToSslEngine(endpoint, info, sslEngine); } else if (connection instanceof ALPNServerConnection) { SSLEngine sslEngine = ((ALPNServerConnection) connection).getSSLEngine(); addReferenceToSslEngine(endpoint, info, sslEngine); } else if (connection instanceof HttpConnection) { info.setHttpProtocol("HTTP/1.1"); } else if (connection instanceof HTTP2ServerConnection) { info.setHttpProtocol("HTTP/2.0"); } else if (connectionClassName.endsWith("ProxyProtocolV1Connection")) { info.setProxyProtocolVersion("v1"); } else if (connectionClassName.endsWith("ProxyProtocolV2Connection")) { info.setProxyProtocolVersion("v2"); } if (connection.getEndPoint() instanceof ProxyConnectionFactory.ProxyEndPoint) { InetSocketAddress remoteAddress = connection.getEndPoint().getRemoteAddress(); info.setRemoteAddress(remoteAddress); } }); } @Override public void onClosed(Connection connection) { handleListenerInvocation("Connection.Listener", "onClosed", "%h", List.of(connection), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).orElse(null); if (info == null) return; if (connection instanceof HttpConnection) { info.setHttpBytes(connection.getBytesIn(), connection.getBytesOut()); } if (!endpoint.isOpen()) { info.setClosedAt(System.currentTimeMillis()); connectionLog.log(info.toLogEntry()); connectionInfos.remove(endpoint); sslEngines.remove(endpoint) .ifPresent(sslToConnectionInfo::remove); } }); } @Override public void onRequestBegin(Request request) { handleListenerInvocation("HttpChannel.Listener", "onRequestBegin", "%h", List.of(request), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).get(); info.incrementRequests(); request.setAttribute(CONNECTION_ID_REQUEST_ATTRIBUTE, info.uuid()); }); } @Override public void onResponseBegin(Request request) { handleListenerInvocation("HttpChannel.Listener", "onResponseBegin", "%h", List.of(request), () -> { SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint()); ConnectionInfo info = connectionInfos.get(endpoint).orElse(null); if (info == null) return; info.incrementResponses(); }); } @Override public void handshakeSucceeded(Event event) { SSLEngine sslEngine = event.getSSLEngine(); handleListenerInvocation("SslHandshakeListener", "handshakeSucceeded", "sslEngine=%h", List.of(sslEngine), () -> { ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null); if (info == null) return; info.setSslSessionDetails(sslEngine.getSession()); }); } @Override public void handshakeFailed(Event event, Throwable failure) { SSLEngine sslEngine = event.getSSLEngine(); handleListenerInvocation("SslHandshakeListener", "handshakeFailed", "sslEngine=%h,failure=%s", List.of(sslEngine, failure), () -> { log.log(Level.FINE, failure, failure::toString); ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null); if (info == null) return; info.setSslHandshakeFailure((SSLHandshakeException)failure); }); } private void handleListenerInvocation( String listenerType, String methodName, String methodArgumentsFormat, List<Object> methodArguments, ListenerHandler handler) { if (!enabled) return; try { log.log(Level.FINE, () -> String.format(listenerType + "." + methodName + "(" + methodArgumentsFormat + ")", methodArguments.toArray())); handler.run(); } catch (Exception e) { log.log(Level.WARNING, String.format("Exception in %s.%s listener: %s", listenerType, methodName, e.getMessage()), e); } } /** * Protocol layers are connected through each {@link Connection}'s {@link EndPoint} reference. * This methods iterates through the endpoints recursively to find the underlying socket endpoint. */ private static SocketChannelEndPoint findUnderlyingSocketEndpoint(EndPoint endpoint) { if (endpoint instanceof SocketChannelEndPoint) { return (SocketChannelEndPoint) endpoint; } else if (endpoint instanceof SslConnection.DecryptedEndPoint) { var decryptedEndpoint = (SslConnection.DecryptedEndPoint) endpoint; return findUnderlyingSocketEndpoint(decryptedEndpoint.getSslConnection().getEndPoint()); } else if (endpoint instanceof ProxyConnectionFactory.ProxyEndPoint) { var proxyEndpoint = (ProxyConnectionFactory.ProxyEndPoint) endpoint; return findUnderlyingSocketEndpoint(proxyEndpoint.unwrap()); } else { throw new IllegalArgumentException("Unknown connection endpoint type: " + endpoint.getClass().getName()); } } @FunctionalInterface private interface ListenerHandler { void run() throws Exception; } private static class ConnectionInfo { private final UUID uuid; private final long createdAt; private final InetSocketAddress localAddress; private final InetSocketAddress peerAddress; private long closedAt = 0; private long httpBytesReceived = 0; private long httpBytesSent = 0; private long requests = 0; private long responses = 0; private InetSocketAddress remoteAddress; private byte[] sslSessionId; private String sslProtocol; private String sslCipherSuite; private String sslPeerSubject; private Date sslPeerNotBefore; private Date sslPeerNotAfter; private List<SNIServerName> sslSniServerNames; private SSLHandshakeException sslHandshakeException; private String proxyProtocolVersion; private String httpProtocol; private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) { this.uuid = uuid; this.createdAt = createdAt; this.localAddress = localAddress; this.peerAddress = peerAddress; } static ConnectionInfo from(SocketChannelEndPoint endpoint) { return new ConnectionInfo( UUID.randomUUID(), endpoint.getCreatedTimeStamp(), endpoint.getLocalAddress(), endpoint.getRemoteAddress()); } synchronized UUID uuid() { return uuid; } synchronized ConnectionInfo setClosedAt(long closedAt) { this.closedAt = closedAt; return this; } synchronized ConnectionInfo setHttpBytes(long received, long sent) { this.httpBytesReceived = received; this.httpBytesSent = sent; return this; } synchronized ConnectionInfo incrementRequests() { ++this.requests; return this; } synchronized ConnectionInfo incrementResponses() { ++this.responses; return this; } synchronized ConnectionInfo setRemoteAddress(InetSocketAddress remoteAddress) { this.remoteAddress = remoteAddress; return this; } synchronized ConnectionInfo setSslSessionDetails(SSLSession session) { this.sslCipherSuite = session.getCipherSuite(); this.sslProtocol = session.getProtocol(); this.sslSessionId = session.getId(); if (session instanceof ExtendedSSLSession) { ExtendedSSLSession extendedSession = (ExtendedSSLSession) session; this.sslSniServerNames = extendedSession.getRequestedServerNames(); } try { this.sslPeerSubject = session.getPeerPrincipal().getName(); X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0]; this.sslPeerNotBefore = peerCertificate.getNotBefore(); this.sslPeerNotAfter = peerCertificate.getNotAfter(); } catch (SSLPeerUnverifiedException e) { } return this; } synchronized ConnectionInfo setSslHandshakeFailure(SSLHandshakeException exception) { this.sslHandshakeException = exception; return this; } synchronized ConnectionInfo setHttpProtocol(String protocol) { this.httpProtocol = protocol; return this; } synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; } synchronized ConnectionLogEntry toLogEntry() { ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt)); if (closedAt > 0) { builder.withDuration((closedAt - createdAt) / 1000D); } if (httpBytesReceived > 0) { builder.withHttpBytesReceived(httpBytesReceived); } if (httpBytesSent > 0) { builder.withHttpBytesSent(httpBytesSent); } if (requests > 0) { builder.withRequests(requests); } if (responses > 0) { builder.withResponses(responses); } if (peerAddress != null) { builder.withPeerAddress(peerAddress.getHostString()) .withPeerPort(peerAddress.getPort()); } if (localAddress != null) { builder.withLocalAddress(localAddress.getHostString()) .withLocalPort(localAddress.getPort()); } if (remoteAddress != null) { builder.withRemoteAddress(remoteAddress.getHostString()) .withRemotePort(remoteAddress.getPort()); } if (sslProtocol != null && sslCipherSuite != null && sslSessionId != null) { builder.withSslProtocol(sslProtocol) .withSslCipherSuite(sslCipherSuite) .withSslSessionId(HexDump.toHexString(sslSessionId)); } if (sslSniServerNames != null) { sslSniServerNames.stream() .filter(name -> name instanceof SNIHostName && name.getType() == StandardConstants.SNI_HOST_NAME) .map(name -> ((SNIHostName) name).getAsciiName()) .findAny() .ifPresent(builder::withSslSniServerName); } if (sslPeerSubject != null && sslPeerNotAfter != null && sslPeerNotBefore != null) { builder.withSslPeerSubject(sslPeerSubject) .withSslPeerNotAfter(sslPeerNotAfter.toInstant()) .withSslPeerNotBefore(sslPeerNotBefore.toInstant()); } if (sslHandshakeException != null) { List<ExceptionEntry> exceptionChain = new ArrayList<>(); Throwable cause = sslHandshakeException; while (cause != null) { exceptionChain.add(new ExceptionEntry(cause.getClass().getName(), cause.getMessage())); cause = cause.getCause(); } String type = SslHandshakeFailure.fromSslHandshakeException(sslHandshakeException) .map(SslHandshakeFailure::failureType) .orElse("UNKNOWN"); builder.withSslHandshakeFailure(new ConnectionLogEntry.SslHandshakeFailure(type, exceptionChain)); } if (httpProtocol != null) { builder.withHttpProtocol(httpProtocol); } if (proxyProtocolVersion != null) { builder.withProxyProtocolVersion(proxyProtocolVersion); } return builder.build(); } } }
Yes. Looks to me like all bool fields are OK, but I didn't check the ones that have separate serialisers.
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlime(json); Inspector params = slime.get(); return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool()) .dryRun(params.field(DRY_RUN_PARAM_NAME).asBool()) .verbose(params.field(VERBOSE_PARAM_NAME).asBool()) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(params.field(VESPA_VERSION_PARAM_NAME).asString()) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(params.field(FORCE_PARAM_NAME).asBool()) .waitForResourcesInPrepare(params.field(WAIT_FOR_RESOURCES_IN_PREPARE).asBool()) .build(); }
return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool())
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlimeOrThrow(json); Inspector params = slime.get(); return new Builder() .ignoreValidationErrors(booleanValue(params, IGNORE_VALIDATION_PARAM_NAME)) .dryRun(booleanValue(params, DRY_RUN_PARAM_NAME)) .verbose(booleanValue(params, VERBOSE_PARAM_NAME)) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(SlimeUtils.optionalString(params.field(VESPA_VERSION_PARAM_NAME)).orElse(null)) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(booleanValue(params, FORCE_PARAM_NAME)) .waitForResourcesInPrepare(booleanValue(params, WAIT_FOR_RESOURCES_IN_PREPARE)) .build(); }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
Use `params` from above instead of reading all bytes again?
protected HttpResponse handlePOST(HttpRequest request) { validateDataAndHeader(request, List.of(APPLICATION_X_GZIP, APPLICATION_ZIP, MULTIPART_FORM_DATA)); TenantName tenantName = validateTenant(request); PrepareParams prepareParams; CompressedApplicationInputStream compressedStream; boolean multipartRequest = Optional.ofNullable(request.getHeader(HttpHeaders.Names.CONTENT_TYPE)) .map(val -> val.equalsIgnoreCase(MULTIPART_FORM_DATA)) .orElse(false); if(multipartRequest) { try { MultiPartFormInputStream multiPartFormInputStream = new MultiPartFormInputStream(request.getData(), request.getHeader(CONTENT_TYPE), /* config */null, /* contextTmpDir */null); Map<String, Part> parts = multiPartFormInputStream.getParts().stream() .collect(Collectors.toMap(Part::getName, p -> p)); byte[] params = parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes(); log.log(Level.FINE, "Deploy parameters: [{}]", new String(params, StandardCharsets.UTF_8)); prepareParams = PrepareParams.fromJson(parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes(), tenantName, zookeeperBarrierTimeout); Part appPackagePart = parts.get(MULTIPART_APPLICATION_PACKAGE); compressedStream = createFromCompressedStream(appPackagePart.getInputStream(), appPackagePart.getContentType()); } catch (IOException e) { log.log(Level.WARNING, "Unable to parse multipart in deploy", e); throw new BadRequestException("Request contains invalid data"); } } else { prepareParams = PrepareParams.fromHttpRequest(request, tenantName, zookeeperBarrierTimeout); compressedStream = createFromCompressedStream(request.getData(), request.getHeader(contentTypeHeader)); } PrepareResult result = applicationRepository.deploy(compressedStream, prepareParams); return new SessionPrepareAndActivateResponse(result, request, prepareParams.getApplicationId(), zone); }
prepareParams = PrepareParams.fromJson(parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes(), tenantName, zookeeperBarrierTimeout);
protected HttpResponse handlePOST(HttpRequest request) { validateDataAndHeader(request, List.of(APPLICATION_X_GZIP, APPLICATION_ZIP, MULTIPART_FORM_DATA)); TenantName tenantName = validateTenant(request); PrepareParams prepareParams; CompressedApplicationInputStream compressedStream; boolean multipartRequest = Optional.ofNullable(request.getHeader(HttpHeaders.Names.CONTENT_TYPE)) .map(val -> val.equalsIgnoreCase(MULTIPART_FORM_DATA)) .orElse(false); if(multipartRequest) { try { MultiPartFormInputStream multiPartFormInputStream = new MultiPartFormInputStream(request.getData(), request.getHeader(CONTENT_TYPE), /* config */null, /* contextTmpDir */null); Map<String, Part> parts = multiPartFormInputStream.getParts().stream() .collect(Collectors.toMap(Part::getName, p -> p)); byte[] params = parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes(); log.log(Level.FINE, "Deploy parameters: [{}]", new String(params, StandardCharsets.UTF_8)); prepareParams = PrepareParams.fromJson(params, tenantName, zookeeperBarrierTimeout); Part appPackagePart = parts.get(MULTIPART_APPLICATION_PACKAGE); compressedStream = createFromCompressedStream(appPackagePart.getInputStream(), appPackagePart.getContentType()); } catch (IOException e) { log.log(Level.WARNING, "Unable to parse multipart in deploy", e); throw new BadRequestException("Request contains invalid data"); } } else { prepareParams = PrepareParams.fromHttpRequest(request, tenantName, zookeeperBarrierTimeout); compressedStream = createFromCompressedStream(request.getData(), request.getHeader(contentTypeHeader)); } PrepareResult result = applicationRepository.deploy(compressedStream, prepareParams); return new SessionPrepareAndActivateResponse(result, request, prepareParams.getApplicationId(), zone); }
class ApplicationApiHandler extends SessionHandler { public final static String APPLICATION_X_GZIP = "application/x-gzip"; public final static String APPLICATION_ZIP = "application/zip"; public final static String MULTIPART_FORM_DATA = "multipart/form-data"; public final static String MULTIPART_PARAMS = "prepareParams"; public final static String MULTIPART_APPLICATION_PACKAGE = "applicationPackage"; public final static String contentTypeHeader = "Content-Type"; private final TenantRepository tenantRepository; private final Duration zookeeperBarrierTimeout; private final Zone zone; @Inject public ApplicationApiHandler(Context ctx, ApplicationRepository applicationRepository, ConfigserverConfig configserverConfig, Zone zone) { super(ctx, applicationRepository); this.tenantRepository = applicationRepository.tenantRepository(); this.zookeeperBarrierTimeout = Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()); this.zone = zone; } @Override @Override public Duration getTimeout() { return zookeeperBarrierTimeout.plus(Duration.ofSeconds(10)); } private TenantName validateTenant(HttpRequest request) { TenantName tenantName = getTenantNameFromRequest(request); checkThatTenantExists(tenantRepository, tenantName); return tenantName; } public static TenantName getTenantNameFromRequest(HttpRequest request) { BindingMatch<?> bm = Utils.getBindingMatch(request, "http: return TenantName.from(bm.group(2)); } }
class ApplicationApiHandler extends SessionHandler { public final static String APPLICATION_X_GZIP = "application/x-gzip"; public final static String APPLICATION_ZIP = "application/zip"; public final static String MULTIPART_FORM_DATA = "multipart/form-data"; public final static String MULTIPART_PARAMS = "prepareParams"; public final static String MULTIPART_APPLICATION_PACKAGE = "applicationPackage"; public final static String contentTypeHeader = "Content-Type"; private final TenantRepository tenantRepository; private final Duration zookeeperBarrierTimeout; private final Zone zone; @Inject public ApplicationApiHandler(Context ctx, ApplicationRepository applicationRepository, ConfigserverConfig configserverConfig, Zone zone) { super(ctx, applicationRepository); this.tenantRepository = applicationRepository.tenantRepository(); this.zookeeperBarrierTimeout = Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()); this.zone = zone; } @Override @Override public Duration getTimeout() { return zookeeperBarrierTimeout.plus(Duration.ofSeconds(10)); } private TenantName validateTenant(HttpRequest request) { TenantName tenantName = getTenantNameFromRequest(request); checkThatTenantExists(tenantRepository, tenantName); return tenantName; } public static TenantName getTenantNameFromRequest(HttpRequest request) { BindingMatch<?> bm = Utils.getBindingMatch(request, "http: return TenantName.from(bm.group(2)); } }
Won't this fail if there is no field `IGNORE_VALIDATION_PARAM_NAME`? Check with `valid()` first?
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlime(json); Inspector params = slime.get(); return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool()) .dryRun(params.field(DRY_RUN_PARAM_NAME).asBool()) .verbose(params.field(VERBOSE_PARAM_NAME).asBool()) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(params.field(VESPA_VERSION_PARAM_NAME).asString()) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(params.field(FORCE_PARAM_NAME).asBool()) .waitForResourcesInPrepare(params.field(WAIT_FOR_RESOURCES_IN_PREPARE).asBool()) .build(); }
return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool())
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlimeOrThrow(json); Inspector params = slime.get(); return new Builder() .ignoreValidationErrors(booleanValue(params, IGNORE_VALIDATION_PARAM_NAME)) .dryRun(booleanValue(params, DRY_RUN_PARAM_NAME)) .verbose(booleanValue(params, VERBOSE_PARAM_NAME)) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(SlimeUtils.optionalString(params.field(VESPA_VERSION_PARAM_NAME)).orElse(null)) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(booleanValue(params, FORCE_PARAM_NAME)) .waitForResourcesInPrepare(booleanValue(params, WAIT_FOR_RESOURCES_IN_PREPARE)) .build(); }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
It defaults to `false`.
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlime(json); Inspector params = slime.get(); return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool()) .dryRun(params.field(DRY_RUN_PARAM_NAME).asBool()) .verbose(params.field(VERBOSE_PARAM_NAME).asBool()) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(params.field(VESPA_VERSION_PARAM_NAME).asString()) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(params.field(FORCE_PARAM_NAME).asBool()) .waitForResourcesInPrepare(params.field(WAIT_FOR_RESOURCES_IN_PREPARE).asBool()) .build(); }
return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool())
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlimeOrThrow(json); Inspector params = slime.get(); return new Builder() .ignoreValidationErrors(booleanValue(params, IGNORE_VALIDATION_PARAM_NAME)) .dryRun(booleanValue(params, DRY_RUN_PARAM_NAME)) .verbose(booleanValue(params, VERBOSE_PARAM_NAME)) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(SlimeUtils.optionalString(params.field(VESPA_VERSION_PARAM_NAME)).orElse(null)) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(booleanValue(params, FORCE_PARAM_NAME)) .waitForResourcesInPrepare(booleanValue(params, WAIT_FOR_RESOURCES_IN_PREPARE)) .build(); }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
Prefer `jsonToSlimeOrThrow`.
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlime(json); Inspector params = slime.get(); return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool()) .dryRun(params.field(DRY_RUN_PARAM_NAME).asBool()) .verbose(params.field(VERBOSE_PARAM_NAME).asBool()) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(params.field(VESPA_VERSION_PARAM_NAME).asString()) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(params.field(FORCE_PARAM_NAME).asBool()) .waitForResourcesInPrepare(params.field(WAIT_FOR_RESOURCES_IN_PREPARE).asBool()) .build(); }
Slime slime = SlimeUtils.jsonToSlime(json);
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlimeOrThrow(json); Inspector params = slime.get(); return new Builder() .ignoreValidationErrors(booleanValue(params, IGNORE_VALIDATION_PARAM_NAME)) .dryRun(booleanValue(params, DRY_RUN_PARAM_NAME)) .verbose(booleanValue(params, VERBOSE_PARAM_NAME)) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(SlimeUtils.optionalString(params.field(VESPA_VERSION_PARAM_NAME)).orElse(null)) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(booleanValue(params, FORCE_PARAM_NAME)) .waitForResourcesInPrepare(booleanValue(params, WAIT_FOR_RESOURCES_IN_PREPARE)) .build(); }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
Sure, that was just an example, though, same issue with other fields that are not checked explicitly
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlime(json); Inspector params = slime.get(); return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool()) .dryRun(params.field(DRY_RUN_PARAM_NAME).asBool()) .verbose(params.field(VERBOSE_PARAM_NAME).asBool()) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(params.field(VESPA_VERSION_PARAM_NAME).asString()) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(params.field(FORCE_PARAM_NAME).asBool()) .waitForResourcesInPrepare(params.field(WAIT_FOR_RESOURCES_IN_PREPARE).asBool()) .build(); }
return new Builder().ignoreValidationErrors(params.field(IGNORE_VALIDATION_PARAM_NAME).asBool())
public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) { Slime slime = SlimeUtils.jsonToSlimeOrThrow(json); Inspector params = slime.get(); return new Builder() .ignoreValidationErrors(booleanValue(params, IGNORE_VALIDATION_PARAM_NAME)) .dryRun(booleanValue(params, DRY_RUN_PARAM_NAME)) .verbose(booleanValue(params, VERBOSE_PARAM_NAME)) .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout))) .applicationId(createApplicationId(params, tenant)) .vespaVersion(SlimeUtils.optionalString(params.field(VESPA_VERSION_PARAM_NAME)).orElse(null)) .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList())) .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime)) .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null)) .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null)) .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null))) .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime)) .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null)) .force(booleanValue(params, FORCE_PARAM_NAME)) .waitForResourcesInPrepare(booleanValue(params, WAIT_FOR_RESOURCES_IN_PREPARE)) .build(); }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
class Builder { private boolean ignoreValidationErrors = false; private boolean dryRun = false; private boolean verbose = false; private boolean isBootstrap = false; private boolean force = false; private boolean waitForResourcesInPrepare = false; private ApplicationId applicationId = null; private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60)); private Optional<Version> vespaVersion = Optional.empty(); private List<ContainerEndpoint> containerEndpoints = null; private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty(); private Optional<DockerImage> dockerImageRepository = Optional.empty(); private Optional<AthenzDomain> athenzDomain = Optional.empty(); private Optional<ApplicationRoles> applicationRoles = Optional.empty(); private Optional<Quota> quota = Optional.empty(); private List<TenantSecretStore> tenantSecretStores = List.of(); public Builder() { } public Builder applicationId(ApplicationId applicationId) { this.applicationId = applicationId; return this; } public Builder ignoreValidationErrors(boolean ignoreValidationErrors) { this.ignoreValidationErrors = ignoreValidationErrors; return this; } public Builder dryRun(boolean dryRun) { this.dryRun = dryRun; return this; } public Builder verbose(boolean verbose) { this.verbose = verbose; return this; } public Builder isBootstrap(boolean isBootstrap) { this.isBootstrap = isBootstrap; return this; } public Builder timeoutBudget(TimeoutBudget timeoutBudget) { this.timeoutBudget = timeoutBudget; return this; } public Builder vespaVersion(String vespaVersion) { Optional<Version> version = Optional.empty(); if (vespaVersion != null && !vespaVersion.isEmpty()) { version = Optional.of(Version.fromString(vespaVersion)); } this.vespaVersion = version; return this; } public Builder vespaVersion(Version vespaVersion) { this.vespaVersion = Optional.ofNullable(vespaVersion); return this; } public Builder containerEndpoints(String serialized) { this.containerEndpoints = (serialized == null) ? List.of() : ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized)); return this; } public Builder containerEndpointList(List<ContainerEndpoint> endpoints) { this.containerEndpoints = endpoints; return this; } public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) { this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata); return this; } public Builder endpointCertificateMetadata(String serialized) { this.endpointCertificateMetadata = (serialized == null) ? Optional.empty() : Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder dockerImageRepository(String dockerImageRepository) { this.dockerImageRepository = (dockerImageRepository == null) ? Optional.empty() : Optional.of(DockerImage.fromString(dockerImageRepository)); return this; } public Builder dockerImageRepository(DockerImage dockerImageRepository) { this.dockerImageRepository = Optional.ofNullable(dockerImageRepository); return this; } public Builder athenzDomain(String athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from); return this; } public Builder athenzDomain(AthenzDomain athenzDomain) { this.athenzDomain = Optional.ofNullable(athenzDomain); return this; } public Builder applicationRoles(ApplicationRoles applicationRoles) { this.applicationRoles = Optional.ofNullable(applicationRoles); return this; } public Builder quota(Quota quota) { this.quota = Optional.ofNullable(quota); return this; } public Builder quota(String serialized) { this.quota = (serialized == null) ? Optional.empty() : Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get())); return this; } public Builder tenantSecretStores(String serialized) { List<TenantSecretStore> secretStores = (serialized == null) ? List.of() : TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get()); return tenantSecretStores(secretStores); } public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) { this.tenantSecretStores = tenantSecretStores; return this; } public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) { this.waitForResourcesInPrepare = waitForResourcesInPrepare; return this; } public Builder force(boolean force) { this.force = force; return this; } public PrepareParams build() { return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun, verbose, isBootstrap, vespaVersion, containerEndpoints, endpointCertificateMetadata, dockerImageRepository, athenzDomain, applicationRoles, quota, tenantSecretStores, force, waitForResourcesInPrepare); } }
You're sure you won't regret making this an array instead of having a top-level object? :)
public static Slime toSlime(List<X509Certificate> certificateList) { Slime slime = new Slime(); Cursor array = slime.setArray(); certificateList.stream() .map(X509CertificateUtils::toPem) .forEach(array::addString); return slime; }
Cursor array = slime.setArray();
public static Slime toSlime(List<X509Certificate> certificateList) { Slime slime = new Slime(); var root = slime.setObject(); Cursor array = root.setArray(certificateField); certificateList.stream() .map(X509CertificateUtils::toPem) .forEach(array::addString); return slime; }
class OperatorCertificateSerializer { public static List<X509Certificate> fromSlime(Inspector array) { return SlimeUtils.entriesStream(array) .map(Inspector::asString) .map(X509CertificateUtils::fromPem) .collect(Collectors.toList()); } }
class OperatorCertificateSerializer { private final static String certificateField = "certificates"; public static List<X509Certificate> fromSlime(Inspector object) { return SlimeUtils.entriesStream(object.field(certificateField)) .map(Inspector::asString) .map(X509CertificateUtils::fromPem) .collect(Collectors.toList()); } }
Good point @oyving, we have regretted doing this before.
public static Slime toSlime(List<X509Certificate> certificateList) { Slime slime = new Slime(); Cursor array = slime.setArray(); certificateList.stream() .map(X509CertificateUtils::toPem) .forEach(array::addString); return slime; }
Cursor array = slime.setArray();
public static Slime toSlime(List<X509Certificate> certificateList) { Slime slime = new Slime(); var root = slime.setObject(); Cursor array = root.setArray(certificateField); certificateList.stream() .map(X509CertificateUtils::toPem) .forEach(array::addString); return slime; }
class OperatorCertificateSerializer { public static List<X509Certificate> fromSlime(Inspector array) { return SlimeUtils.entriesStream(array) .map(Inspector::asString) .map(X509CertificateUtils::fromPem) .collect(Collectors.toList()); } }
class OperatorCertificateSerializer { private final static String certificateField = "certificates"; public static List<X509Certificate> fromSlime(Inspector object) { return SlimeUtils.entriesStream(object.field(certificateField)) .map(Inspector::asString) .map(X509CertificateUtils::fromPem) .collect(Collectors.toList()); } }
Hmm ...
static String historyBadge(ApplicationId id, JobStatus status, int length) { List<String> sections = new ArrayList<>(); List<String> texts = new ArrayList<>(); double x = 0; String text = id.toFullString(); double textWidth = widthOf(text); double dx = xPad + logoSize + xPad + textWidth + xPad; addShade(sections, x, dx); sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n"); addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth); x += dx; if (status.lastTriggered().isEmpty()) return badge(sections, texts, x); Run lastTriggered = status.lastTriggered().get(); List<Run> runs = status.runs().descendingMap().values().stream() .filter(Run::hasEnded) .skip(1) .limit(length + (lastTriggered.hasEnded() ? 0 : 1)) .collect(toList()); boolean isOk = status.lastCompleted().map(run -> run.status() == RunStatus.success).orElse(true); text = lastTriggered.id().type().jobName(); textWidth = widthOf(text); dx = xPad + textWidth + xPad; addShade(sections, x, dx); sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(lastTriggered, isOk) + "'/>\n"); addShadow(sections, x + dx); addText(texts, text, x + dx / 2, textWidth); x += dx; dx = xPad * (192.0 / (32 + runs.size())); for (Run run : runs) { addShade(sections, x, dx); sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(run, null) + "'/>\n"); addShadow(sections, x + dx); dx *= Math.pow(0.3, 1.0 / (runs.size() + 8)); x += dx; } Collections.reverse(sections); return badge(sections, texts, x); }
.limit(length + (lastTriggered.hasEnded() ? 0 : 1))
static String historyBadge(ApplicationId id, JobStatus status, int length) { List<String> sections = new ArrayList<>(); List<String> texts = new ArrayList<>(); double x = 0; String text = id.toFullString(); double textWidth = widthOf(text); double dx = xPad + logoSize + xPad + textWidth + xPad; addShade(sections, x, dx); sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n"); addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth); x += dx; if (status.lastTriggered().isEmpty()) return badge(sections, texts, x); Run lastTriggered = status.lastTriggered().get(); List<Run> runs = status.runs().descendingMap().values().stream() .filter(Run::hasEnded) .skip(1) .limit(length + (lastTriggered.hasEnded() ? 0 : 1)) .collect(toList()); boolean isOk = status.lastCompleted().map(run -> run.status() == RunStatus.success).orElse(true); text = lastTriggered.id().type().jobName(); textWidth = widthOf(text); dx = xPad + textWidth + xPad; addShade(sections, x, dx); sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(lastTriggered, isOk) + "'/>\n"); addShadow(sections, x + dx); addText(texts, text, x + dx / 2, textWidth); x += dx; dx = xPad * (192.0 / (32 + runs.size())); for (Run run : runs) { addShade(sections, x, dx); sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(run, null) + "'/>\n"); addShadow(sections, x + dx); dx *= Math.pow(0.3, 1.0 / (runs.size() + 8)); x += dx; } Collections.reverse(sections); return badge(sections, texts, x); }
class Badges { private static final String characterWidths = "[[\" \",35.156],[\"!\",39.355],[\"\\\"\",45.898],[\" private static final double[] widths = new double[128]; static { SlimeUtils.jsonToSlimeOrThrow(characterWidths).get() .traverse((ArrayTraverser) (i, pair) -> { if (i < 95) assert Arrays.equals(new byte[]{(byte) (i + 32)}, pair.entry(0).asUtf8()) : i + ": " + pair.entry(0).asString(); else assert "_median".equals(pair.entry(0).asString()); widths[i] = pair.entry(1).asDouble(); }); } /** Character pixel width of a 100px size Verdana font rendering of the given code point, for code points in the range [32, 126]. */ public static double widthOf(int codePoint) { return 32 <= codePoint && codePoint <= 126 ? widths[codePoint - 32] : widths[95]; } /** Computes an approximate pixel width of the given size Verdana font rendering of the given string, ignoring kerning. */ public static double widthOf(String text, int size) { return text.codePoints().mapToDouble(Badges::widthOf).sum() * (size - 0.5) / 100; } /** Computes an approximate pixel width of a 11px size Verdana font rendering of the given string, ignoring kerning. */ public static double widthOf(String text) { return widthOf(text, 11); } static String colorOf(Run run, Boolean wasOk) { switch (run.status()) { case running: return wasOk ? "url( case success: return success; default: return failure; } } static String nameOf(JobType type) { return type.isTest() ? type.isProduction() ? "test" : type.jobName() : type.jobName().replace("production-", ""); } static final double xPad = 6; static final double logoSize = 16; static final String dark = " static final String success = " static final String running = " static final String failure = " static void addText(List<String> texts, String text, double x, double width) { addText(texts, text, x, width, 11); } static void addText(List<String> texts, String text, double x, double width, int size) { texts.add(" <text font-size='" + size + "' x='" + (x + 0.5) + "' y='" + (15) + "' fill=' texts.add(" <text font-size='" + size + "' x='" + x + "' y='" + (14) + "' fill=' } static void addShade(List<String> sections, double x, double width) { sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (width + 6) + "' height='20' fill='url( } static void addShadow(List<String> sections, double x) { sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + 8 + "' height='20' fill='url( } static String overviewBadge(ApplicationId id, JobList jobs, SystemName system) { List<Run> runs = new ArrayList<>(jobs.lastTriggered().asList()); boolean anyTest = false; for (int i = 0; i < runs.size(); i++) { Run run = runs.get(i); if (run.id().type().isProduction() && run.id().type().isTest()) { anyTest = true; int j = i; while ( ! runs.get(j - 1).id().type().zone(system).equals(run.id().type().zone(system))) runs.set(j, runs.get(--j)); runs.set(j, run); } } List<String> sections = new ArrayList<>(); List<String> texts = new ArrayList<>(); double x = 0; String text = id.toFullString(); double textWidth = widthOf(text); double dx = xPad + logoSize + xPad + textWidth + xPad; double tdx = xPad + widthOf("test"); addShade(sections, 0, dx); sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n"); addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth); x += dx; for (int i = 0; i < runs.size(); i++) { Run run = runs.get(i); Run test = i + 1 < runs.size() ? runs.get(i + 1) : null; if (test == null || ! test.id().type().isTest() || ! test.id().type().isProduction()) test = null; boolean isTest = run.id().type().isTest() && run.id().type().isProduction(); text = nameOf(run.id().type()); textWidth = widthOf(text, isTest ? 9 : 11); dx = xPad + textWidth + (isTest ? 0 : xPad); boolean wasOk = jobs.get(run.id().job()).flatMap(JobStatus::lastStatus).map(RunStatus.success::equals).orElse(true); addText(texts, text, x + (dx - (isTest ? xPad : 0)) / 2, textWidth, isTest ? 9 : 11); if ( ! run.id().type().isTest() && anyTest) { String deploy = "deploy"; textWidth = widthOf(deploy, 9); addText(texts, deploy, x + dx + textWidth / 2, textWidth, 9); dx += textWidth + xPad; } if ( ! (isTest)) addShade(sections, x, dx + (test != null ? tdx : 0)); if (test == null) sections.add(" <rect x='" + (x - 16) + "' rx='3' width='" + (dx + 16) + "' height='20' fill='" + colorOf(run, wasOk) + "'/>\n"); else sections.add(" <polygon points='" + (x - 6) + " 0 " + (x - 6) + " 20 " + (x + dx - 7) + " 20 " + (x + dx + 1) + " 0' fill='" + colorOf(run, wasOk) + "'/>\n"); if (test == null) addShadow(sections, x + dx); x += dx; } Collections.reverse(sections); return badge(sections, texts, x); } static String badge(List<String> sections, List<String> texts, double width) { return "<svg xmlns='http: " <title>Deployment Status</title>\n" + " <linearGradient id='light' x2='0' y2='100%'>\n" + " <stop offset='0' stop-color=' " <stop offset='.1' stop-color=' " <stop offset='.9' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='left-light' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.5' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='right-shadow' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.5' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='shadow' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.625' stop-color=' " <stop offset='.9' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='shade' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='0.05' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='run-on-failure' x1='40%' x2='80%' y2='0%'>\n" + " <stop offset='0' stop-color='" + running + "' />\n" + " <stop offset='1' stop-color='" + failure + "' />\n" + " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" + " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" + " </linearGradient>\n" + " <linearGradient id='run-on-success' x1='40%' x2='80%' y2='0%'>\n" + " <stop offset='0' stop-color='" + running + "' />\n" + " <stop offset='1' stop-color='" + success + "' />\n" + " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" + " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" + " </linearGradient>\n" + " <clipPath id='rounded'>\n" + " <rect width='" + width + "' height='20' rx='3' fill=' " </clipPath>\n" + " <g clip-path='url( String.join("", sections) + " <rect width='" + 2 + "' height='20' fill='url( " <rect x='" + (width - 2) + "' width='" + 2 + "' height='20' fill='url( " <rect width='" + width + "' height='20' fill='url( " </g>\n" + " <g fill=' " <svg x='" + (xPad + 0.5) + "' y='" + ((20 - logoSize) / 2 + 1) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" + " <polygon fill=' " <polygon fill=' " <polygon fill=' " <polygon fill=' " </svg>\n" + " <svg x='" + xPad + "' y='" + ((20 - logoSize) / 2) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" + " <linearGradient id='yellow-shaded' x1='91.17' y1='44.83' x2='136.24' y2='73.4' gradientUnits='userSpaceOnUse'>\n" + " <stop offset='0.01' stop-color=' " <stop offset='0.54' stop-color=' " </linearGradient>\n" + " <linearGradient id='blue-shaded' x1='60.71' y1='104.56' x2='-15.54' y2='63' gradientUnits='userSpaceOnUse'>\n" + " <stop offset='0' stop-color=' " <stop offset='0.54' stop-color=' " </linearGradient>\n" + " <polygon fill=' " <polygon fill='url( " <polygon fill=' " <polygon fill='url( " </svg>\n" + String.join("", texts) + " </g>\n" + "</svg>\n"; } }
class Badges { private static final String characterWidths = "[[\" \",35.156],[\"!\",39.355],[\"\\\"\",45.898],[\" private static final double[] widths = new double[128]; static { SlimeUtils.jsonToSlimeOrThrow(characterWidths).get() .traverse((ArrayTraverser) (i, pair) -> { if (i < 95) assert Arrays.equals(new byte[]{(byte) (i + 32)}, pair.entry(0).asUtf8()) : i + ": " + pair.entry(0).asString(); else assert "_median".equals(pair.entry(0).asString()); widths[i] = pair.entry(1).asDouble(); }); } /** Character pixel width of a 100px size Verdana font rendering of the given code point, for code points in the range [32, 126]. */ public static double widthOf(int codePoint) { return 32 <= codePoint && codePoint <= 126 ? widths[codePoint - 32] : widths[95]; } /** Computes an approximate pixel width of the given size Verdana font rendering of the given string, ignoring kerning. */ public static double widthOf(String text, int size) { return text.codePoints().mapToDouble(Badges::widthOf).sum() * (size - 0.5) / 100; } /** Computes an approximate pixel width of a 11px size Verdana font rendering of the given string, ignoring kerning. */ public static double widthOf(String text) { return widthOf(text, 11); } static String colorOf(Run run, Boolean wasOk) { switch (run.status()) { case running: return wasOk ? "url( case success: return success; default: return failure; } } static String nameOf(JobType type) { return type.isTest() ? type.isProduction() ? "test" : type.jobName() : type.jobName().replace("production-", ""); } static final double xPad = 6; static final double logoSize = 16; static final String dark = " static final String success = " static final String running = " static final String failure = " static void addText(List<String> texts, String text, double x, double width) { addText(texts, text, x, width, 11); } static void addText(List<String> texts, String text, double x, double width, int size) { texts.add(" <text font-size='" + size + "' x='" + (x + 0.5) + "' y='" + (15) + "' fill=' texts.add(" <text font-size='" + size + "' x='" + x + "' y='" + (14) + "' fill=' } static void addShade(List<String> sections, double x, double width) { sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (width + 6) + "' height='20' fill='url( } static void addShadow(List<String> sections, double x) { sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + 8 + "' height='20' fill='url( } static String overviewBadge(ApplicationId id, JobList jobs, SystemName system) { List<Run> runs = new ArrayList<>(jobs.lastTriggered().asList()); boolean anyTest = false; for (int i = 0; i < runs.size(); i++) { Run run = runs.get(i); if (run.id().type().isProduction() && run.id().type().isTest()) { anyTest = true; int j = i; while ( ! runs.get(j - 1).id().type().zone(system).equals(run.id().type().zone(system))) runs.set(j, runs.get(--j)); runs.set(j, run); } } List<String> sections = new ArrayList<>(); List<String> texts = new ArrayList<>(); double x = 0; String text = id.toFullString(); double textWidth = widthOf(text); double dx = xPad + logoSize + xPad + textWidth + xPad; double tdx = xPad + widthOf("test"); addShade(sections, 0, dx); sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n"); addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth); x += dx; for (int i = 0; i < runs.size(); i++) { Run run = runs.get(i); Run test = i + 1 < runs.size() ? runs.get(i + 1) : null; if (test == null || ! test.id().type().isTest() || ! test.id().type().isProduction()) test = null; boolean isTest = run.id().type().isTest() && run.id().type().isProduction(); text = nameOf(run.id().type()); textWidth = widthOf(text, isTest ? 9 : 11); dx = xPad + textWidth + (isTest ? 0 : xPad); boolean wasOk = jobs.get(run.id().job()).flatMap(JobStatus::lastStatus).map(RunStatus.success::equals).orElse(true); addText(texts, text, x + (dx - (isTest ? xPad : 0)) / 2, textWidth, isTest ? 9 : 11); if ( ! run.id().type().isTest() && anyTest) { String deploy = "deploy"; textWidth = widthOf(deploy, 9); addText(texts, deploy, x + dx + textWidth / 2, textWidth, 9); dx += textWidth + xPad; } if ( ! (isTest)) addShade(sections, x, dx + (test != null ? tdx : 0)); if (test == null) sections.add(" <rect x='" + (x - 16) + "' rx='3' width='" + (dx + 16) + "' height='20' fill='" + colorOf(run, wasOk) + "'/>\n"); else sections.add(" <polygon points='" + (x - 6) + " 0 " + (x - 6) + " 20 " + (x + dx - 7) + " 20 " + (x + dx + 1) + " 0' fill='" + colorOf(run, wasOk) + "'/>\n"); if (test == null) addShadow(sections, x + dx); x += dx; } Collections.reverse(sections); return badge(sections, texts, x); } static String badge(List<String> sections, List<String> texts, double width) { return "<svg xmlns='http: " <title>Deployment Status</title>\n" + " <linearGradient id='light' x2='0' y2='100%'>\n" + " <stop offset='0' stop-color=' " <stop offset='.1' stop-color=' " <stop offset='.9' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='left-light' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.5' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='right-shadow' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.5' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='shadow' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.625' stop-color=' " <stop offset='.9' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='shade' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='0.05' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='run-on-failure' x1='40%' x2='80%' y2='0%'>\n" + " <stop offset='0' stop-color='" + running + "' />\n" + " <stop offset='1' stop-color='" + failure + "' />\n" + " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" + " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" + " </linearGradient>\n" + " <linearGradient id='run-on-success' x1='40%' x2='80%' y2='0%'>\n" + " <stop offset='0' stop-color='" + running + "' />\n" + " <stop offset='1' stop-color='" + success + "' />\n" + " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" + " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" + " </linearGradient>\n" + " <clipPath id='rounded'>\n" + " <rect width='" + width + "' height='20' rx='3' fill=' " </clipPath>\n" + " <g clip-path='url( String.join("", sections) + " <rect width='" + 2 + "' height='20' fill='url( " <rect x='" + (width - 2) + "' width='" + 2 + "' height='20' fill='url( " <rect width='" + width + "' height='20' fill='url( " </g>\n" + " <g fill=' " <svg x='" + (xPad + 0.5) + "' y='" + ((20 - logoSize) / 2 + 1) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" + " <polygon fill=' " <polygon fill=' " <polygon fill=' " <polygon fill=' " </svg>\n" + " <svg x='" + xPad + "' y='" + ((20 - logoSize) / 2) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" + " <linearGradient id='yellow-shaded' x1='91.17' y1='44.83' x2='136.24' y2='73.4' gradientUnits='userSpaceOnUse'>\n" + " <stop offset='0.01' stop-color=' " <stop offset='0.54' stop-color=' " </linearGradient>\n" + " <linearGradient id='blue-shaded' x1='60.71' y1='104.56' x2='-15.54' y2='63' gradientUnits='userSpaceOnUse'>\n" + " <stop offset='0' stop-color=' " <stop offset='0.54' stop-color=' " </linearGradient>\n" + " <polygon fill=' " <polygon fill='url( " <polygon fill=' " <polygon fill='url( " </svg>\n" + String.join("", texts) + " </g>\n" + "</svg>\n"; } }
Yes.
static String historyBadge(ApplicationId id, JobStatus status, int length) { List<String> sections = new ArrayList<>(); List<String> texts = new ArrayList<>(); double x = 0; String text = id.toFullString(); double textWidth = widthOf(text); double dx = xPad + logoSize + xPad + textWidth + xPad; addShade(sections, x, dx); sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n"); addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth); x += dx; if (status.lastTriggered().isEmpty()) return badge(sections, texts, x); Run lastTriggered = status.lastTriggered().get(); List<Run> runs = status.runs().descendingMap().values().stream() .filter(Run::hasEnded) .skip(1) .limit(length + (lastTriggered.hasEnded() ? 0 : 1)) .collect(toList()); boolean isOk = status.lastCompleted().map(run -> run.status() == RunStatus.success).orElse(true); text = lastTriggered.id().type().jobName(); textWidth = widthOf(text); dx = xPad + textWidth + xPad; addShade(sections, x, dx); sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(lastTriggered, isOk) + "'/>\n"); addShadow(sections, x + dx); addText(texts, text, x + dx / 2, textWidth); x += dx; dx = xPad * (192.0 / (32 + runs.size())); for (Run run : runs) { addShade(sections, x, dx); sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(run, null) + "'/>\n"); addShadow(sections, x + dx); dx *= Math.pow(0.3, 1.0 / (runs.size() + 8)); x += dx; } Collections.reverse(sections); return badge(sections, texts, x); }
.limit(length + (lastTriggered.hasEnded() ? 0 : 1))
static String historyBadge(ApplicationId id, JobStatus status, int length) { List<String> sections = new ArrayList<>(); List<String> texts = new ArrayList<>(); double x = 0; String text = id.toFullString(); double textWidth = widthOf(text); double dx = xPad + logoSize + xPad + textWidth + xPad; addShade(sections, x, dx); sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n"); addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth); x += dx; if (status.lastTriggered().isEmpty()) return badge(sections, texts, x); Run lastTriggered = status.lastTriggered().get(); List<Run> runs = status.runs().descendingMap().values().stream() .filter(Run::hasEnded) .skip(1) .limit(length + (lastTriggered.hasEnded() ? 0 : 1)) .collect(toList()); boolean isOk = status.lastCompleted().map(run -> run.status() == RunStatus.success).orElse(true); text = lastTriggered.id().type().jobName(); textWidth = widthOf(text); dx = xPad + textWidth + xPad; addShade(sections, x, dx); sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(lastTriggered, isOk) + "'/>\n"); addShadow(sections, x + dx); addText(texts, text, x + dx / 2, textWidth); x += dx; dx = xPad * (192.0 / (32 + runs.size())); for (Run run : runs) { addShade(sections, x, dx); sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(run, null) + "'/>\n"); addShadow(sections, x + dx); dx *= Math.pow(0.3, 1.0 / (runs.size() + 8)); x += dx; } Collections.reverse(sections); return badge(sections, texts, x); }
class Badges { private static final String characterWidths = "[[\" \",35.156],[\"!\",39.355],[\"\\\"\",45.898],[\" private static final double[] widths = new double[128]; static { SlimeUtils.jsonToSlimeOrThrow(characterWidths).get() .traverse((ArrayTraverser) (i, pair) -> { if (i < 95) assert Arrays.equals(new byte[]{(byte) (i + 32)}, pair.entry(0).asUtf8()) : i + ": " + pair.entry(0).asString(); else assert "_median".equals(pair.entry(0).asString()); widths[i] = pair.entry(1).asDouble(); }); } /** Character pixel width of a 100px size Verdana font rendering of the given code point, for code points in the range [32, 126]. */ public static double widthOf(int codePoint) { return 32 <= codePoint && codePoint <= 126 ? widths[codePoint - 32] : widths[95]; } /** Computes an approximate pixel width of the given size Verdana font rendering of the given string, ignoring kerning. */ public static double widthOf(String text, int size) { return text.codePoints().mapToDouble(Badges::widthOf).sum() * (size - 0.5) / 100; } /** Computes an approximate pixel width of a 11px size Verdana font rendering of the given string, ignoring kerning. */ public static double widthOf(String text) { return widthOf(text, 11); } static String colorOf(Run run, Boolean wasOk) { switch (run.status()) { case running: return wasOk ? "url( case success: return success; default: return failure; } } static String nameOf(JobType type) { return type.isTest() ? type.isProduction() ? "test" : type.jobName() : type.jobName().replace("production-", ""); } static final double xPad = 6; static final double logoSize = 16; static final String dark = " static final String success = " static final String running = " static final String failure = " static void addText(List<String> texts, String text, double x, double width) { addText(texts, text, x, width, 11); } static void addText(List<String> texts, String text, double x, double width, int size) { texts.add(" <text font-size='" + size + "' x='" + (x + 0.5) + "' y='" + (15) + "' fill=' texts.add(" <text font-size='" + size + "' x='" + x + "' y='" + (14) + "' fill=' } static void addShade(List<String> sections, double x, double width) { sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (width + 6) + "' height='20' fill='url( } static void addShadow(List<String> sections, double x) { sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + 8 + "' height='20' fill='url( } static String overviewBadge(ApplicationId id, JobList jobs, SystemName system) { List<Run> runs = new ArrayList<>(jobs.lastTriggered().asList()); boolean anyTest = false; for (int i = 0; i < runs.size(); i++) { Run run = runs.get(i); if (run.id().type().isProduction() && run.id().type().isTest()) { anyTest = true; int j = i; while ( ! runs.get(j - 1).id().type().zone(system).equals(run.id().type().zone(system))) runs.set(j, runs.get(--j)); runs.set(j, run); } } List<String> sections = new ArrayList<>(); List<String> texts = new ArrayList<>(); double x = 0; String text = id.toFullString(); double textWidth = widthOf(text); double dx = xPad + logoSize + xPad + textWidth + xPad; double tdx = xPad + widthOf("test"); addShade(sections, 0, dx); sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n"); addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth); x += dx; for (int i = 0; i < runs.size(); i++) { Run run = runs.get(i); Run test = i + 1 < runs.size() ? runs.get(i + 1) : null; if (test == null || ! test.id().type().isTest() || ! test.id().type().isProduction()) test = null; boolean isTest = run.id().type().isTest() && run.id().type().isProduction(); text = nameOf(run.id().type()); textWidth = widthOf(text, isTest ? 9 : 11); dx = xPad + textWidth + (isTest ? 0 : xPad); boolean wasOk = jobs.get(run.id().job()).flatMap(JobStatus::lastStatus).map(RunStatus.success::equals).orElse(true); addText(texts, text, x + (dx - (isTest ? xPad : 0)) / 2, textWidth, isTest ? 9 : 11); if ( ! run.id().type().isTest() && anyTest) { String deploy = "deploy"; textWidth = widthOf(deploy, 9); addText(texts, deploy, x + dx + textWidth / 2, textWidth, 9); dx += textWidth + xPad; } if ( ! (isTest)) addShade(sections, x, dx + (test != null ? tdx : 0)); if (test == null) sections.add(" <rect x='" + (x - 16) + "' rx='3' width='" + (dx + 16) + "' height='20' fill='" + colorOf(run, wasOk) + "'/>\n"); else sections.add(" <polygon points='" + (x - 6) + " 0 " + (x - 6) + " 20 " + (x + dx - 7) + " 20 " + (x + dx + 1) + " 0' fill='" + colorOf(run, wasOk) + "'/>\n"); if (test == null) addShadow(sections, x + dx); x += dx; } Collections.reverse(sections); return badge(sections, texts, x); } static String badge(List<String> sections, List<String> texts, double width) { return "<svg xmlns='http: " <title>Deployment Status</title>\n" + " <linearGradient id='light' x2='0' y2='100%'>\n" + " <stop offset='0' stop-color=' " <stop offset='.1' stop-color=' " <stop offset='.9' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='left-light' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.5' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='right-shadow' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.5' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='shadow' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.625' stop-color=' " <stop offset='.9' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='shade' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='0.05' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='run-on-failure' x1='40%' x2='80%' y2='0%'>\n" + " <stop offset='0' stop-color='" + running + "' />\n" + " <stop offset='1' stop-color='" + failure + "' />\n" + " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" + " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" + " </linearGradient>\n" + " <linearGradient id='run-on-success' x1='40%' x2='80%' y2='0%'>\n" + " <stop offset='0' stop-color='" + running + "' />\n" + " <stop offset='1' stop-color='" + success + "' />\n" + " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" + " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" + " </linearGradient>\n" + " <clipPath id='rounded'>\n" + " <rect width='" + width + "' height='20' rx='3' fill=' " </clipPath>\n" + " <g clip-path='url( String.join("", sections) + " <rect width='" + 2 + "' height='20' fill='url( " <rect x='" + (width - 2) + "' width='" + 2 + "' height='20' fill='url( " <rect width='" + width + "' height='20' fill='url( " </g>\n" + " <g fill=' " <svg x='" + (xPad + 0.5) + "' y='" + ((20 - logoSize) / 2 + 1) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" + " <polygon fill=' " <polygon fill=' " <polygon fill=' " <polygon fill=' " </svg>\n" + " <svg x='" + xPad + "' y='" + ((20 - logoSize) / 2) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" + " <linearGradient id='yellow-shaded' x1='91.17' y1='44.83' x2='136.24' y2='73.4' gradientUnits='userSpaceOnUse'>\n" + " <stop offset='0.01' stop-color=' " <stop offset='0.54' stop-color=' " </linearGradient>\n" + " <linearGradient id='blue-shaded' x1='60.71' y1='104.56' x2='-15.54' y2='63' gradientUnits='userSpaceOnUse'>\n" + " <stop offset='0' stop-color=' " <stop offset='0.54' stop-color=' " </linearGradient>\n" + " <polygon fill=' " <polygon fill='url( " <polygon fill=' " <polygon fill='url( " </svg>\n" + String.join("", texts) + " </g>\n" + "</svg>\n"; } }
class Badges { private static final String characterWidths = "[[\" \",35.156],[\"!\",39.355],[\"\\\"\",45.898],[\" private static final double[] widths = new double[128]; static { SlimeUtils.jsonToSlimeOrThrow(characterWidths).get() .traverse((ArrayTraverser) (i, pair) -> { if (i < 95) assert Arrays.equals(new byte[]{(byte) (i + 32)}, pair.entry(0).asUtf8()) : i + ": " + pair.entry(0).asString(); else assert "_median".equals(pair.entry(0).asString()); widths[i] = pair.entry(1).asDouble(); }); } /** Character pixel width of a 100px size Verdana font rendering of the given code point, for code points in the range [32, 126]. */ public static double widthOf(int codePoint) { return 32 <= codePoint && codePoint <= 126 ? widths[codePoint - 32] : widths[95]; } /** Computes an approximate pixel width of the given size Verdana font rendering of the given string, ignoring kerning. */ public static double widthOf(String text, int size) { return text.codePoints().mapToDouble(Badges::widthOf).sum() * (size - 0.5) / 100; } /** Computes an approximate pixel width of a 11px size Verdana font rendering of the given string, ignoring kerning. */ public static double widthOf(String text) { return widthOf(text, 11); } static String colorOf(Run run, Boolean wasOk) { switch (run.status()) { case running: return wasOk ? "url( case success: return success; default: return failure; } } static String nameOf(JobType type) { return type.isTest() ? type.isProduction() ? "test" : type.jobName() : type.jobName().replace("production-", ""); } static final double xPad = 6; static final double logoSize = 16; static final String dark = " static final String success = " static final String running = " static final String failure = " static void addText(List<String> texts, String text, double x, double width) { addText(texts, text, x, width, 11); } static void addText(List<String> texts, String text, double x, double width, int size) { texts.add(" <text font-size='" + size + "' x='" + (x + 0.5) + "' y='" + (15) + "' fill=' texts.add(" <text font-size='" + size + "' x='" + x + "' y='" + (14) + "' fill=' } static void addShade(List<String> sections, double x, double width) { sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (width + 6) + "' height='20' fill='url( } static void addShadow(List<String> sections, double x) { sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + 8 + "' height='20' fill='url( } static String overviewBadge(ApplicationId id, JobList jobs, SystemName system) { List<Run> runs = new ArrayList<>(jobs.lastTriggered().asList()); boolean anyTest = false; for (int i = 0; i < runs.size(); i++) { Run run = runs.get(i); if (run.id().type().isProduction() && run.id().type().isTest()) { anyTest = true; int j = i; while ( ! runs.get(j - 1).id().type().zone(system).equals(run.id().type().zone(system))) runs.set(j, runs.get(--j)); runs.set(j, run); } } List<String> sections = new ArrayList<>(); List<String> texts = new ArrayList<>(); double x = 0; String text = id.toFullString(); double textWidth = widthOf(text); double dx = xPad + logoSize + xPad + textWidth + xPad; double tdx = xPad + widthOf("test"); addShade(sections, 0, dx); sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n"); addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth); x += dx; for (int i = 0; i < runs.size(); i++) { Run run = runs.get(i); Run test = i + 1 < runs.size() ? runs.get(i + 1) : null; if (test == null || ! test.id().type().isTest() || ! test.id().type().isProduction()) test = null; boolean isTest = run.id().type().isTest() && run.id().type().isProduction(); text = nameOf(run.id().type()); textWidth = widthOf(text, isTest ? 9 : 11); dx = xPad + textWidth + (isTest ? 0 : xPad); boolean wasOk = jobs.get(run.id().job()).flatMap(JobStatus::lastStatus).map(RunStatus.success::equals).orElse(true); addText(texts, text, x + (dx - (isTest ? xPad : 0)) / 2, textWidth, isTest ? 9 : 11); if ( ! run.id().type().isTest() && anyTest) { String deploy = "deploy"; textWidth = widthOf(deploy, 9); addText(texts, deploy, x + dx + textWidth / 2, textWidth, 9); dx += textWidth + xPad; } if ( ! (isTest)) addShade(sections, x, dx + (test != null ? tdx : 0)); if (test == null) sections.add(" <rect x='" + (x - 16) + "' rx='3' width='" + (dx + 16) + "' height='20' fill='" + colorOf(run, wasOk) + "'/>\n"); else sections.add(" <polygon points='" + (x - 6) + " 0 " + (x - 6) + " 20 " + (x + dx - 7) + " 20 " + (x + dx + 1) + " 0' fill='" + colorOf(run, wasOk) + "'/>\n"); if (test == null) addShadow(sections, x + dx); x += dx; } Collections.reverse(sections); return badge(sections, texts, x); } static String badge(List<String> sections, List<String> texts, double width) { return "<svg xmlns='http: " <title>Deployment Status</title>\n" + " <linearGradient id='light' x2='0' y2='100%'>\n" + " <stop offset='0' stop-color=' " <stop offset='.1' stop-color=' " <stop offset='.9' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='left-light' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.5' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='right-shadow' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.5' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='shadow' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='.625' stop-color=' " <stop offset='.9' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='shade' x2='100%' y2='0'>\n" + " <stop offset='0' stop-color=' " <stop offset='0.05' stop-color=' " <stop offset='1' stop-color=' " </linearGradient>\n" + " <linearGradient id='run-on-failure' x1='40%' x2='80%' y2='0%'>\n" + " <stop offset='0' stop-color='" + running + "' />\n" + " <stop offset='1' stop-color='" + failure + "' />\n" + " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" + " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" + " </linearGradient>\n" + " <linearGradient id='run-on-success' x1='40%' x2='80%' y2='0%'>\n" + " <stop offset='0' stop-color='" + running + "' />\n" + " <stop offset='1' stop-color='" + success + "' />\n" + " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" + " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" + " </linearGradient>\n" + " <clipPath id='rounded'>\n" + " <rect width='" + width + "' height='20' rx='3' fill=' " </clipPath>\n" + " <g clip-path='url( String.join("", sections) + " <rect width='" + 2 + "' height='20' fill='url( " <rect x='" + (width - 2) + "' width='" + 2 + "' height='20' fill='url( " <rect width='" + width + "' height='20' fill='url( " </g>\n" + " <g fill=' " <svg x='" + (xPad + 0.5) + "' y='" + ((20 - logoSize) / 2 + 1) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" + " <polygon fill=' " <polygon fill=' " <polygon fill=' " <polygon fill=' " </svg>\n" + " <svg x='" + xPad + "' y='" + ((20 - logoSize) / 2) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" + " <linearGradient id='yellow-shaded' x1='91.17' y1='44.83' x2='136.24' y2='73.4' gradientUnits='userSpaceOnUse'>\n" + " <stop offset='0.01' stop-color=' " <stop offset='0.54' stop-color=' " </linearGradient>\n" + " <linearGradient id='blue-shaded' x1='60.71' y1='104.56' x2='-15.54' y2='63' gradientUnits='userSpaceOnUse'>\n" + " <stop offset='0' stop-color=' " <stop offset='0.54' stop-color=' " </linearGradient>\n" + " <polygon fill=' " <polygon fill='url( " <polygon fill=' " <polygon fill='url( " </svg>\n" + String.join("", texts) + " </g>\n" + "</svg>\n"; } }
Should this be warning ?
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(), new DaemonThreadFactory("redeploy-apps-")); Map<ApplicationId, Future<DeploymentStatus>> deployments = new HashMap<>(); log.log(Level.INFO, () -> "Redeploying " + applicationIds.size() + " apps: " + applicationIds); applicationIds.forEach(appId -> deployments.put(appId, executor.submit(() -> { log.log(Level.INFO, () -> "Starting redeployment of " + appId); Optional<Deployment> deployment = applicationRepository.deployFromLocalActive(appId, true /* bootstrap */); if (deployment.isPresent()) { deployment.get().activate(); log.log(Level.INFO, () -> appId + " redeployed"); return DeploymentStatus.done; } else { log.log(Level.INFO, () -> "Deployment failed for " + appId + ", unable to get a deployment, active local session is " + applicationRepository.getActiveLocalSession(appId)); return DeploymentStatus.failed; } }))); List<ApplicationId> failedDeployments = checkDeployments(deployments); executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); return failedDeployments; }
log.log(Level.INFO, () -> "Deployment failed for " + appId +
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(), new DaemonThreadFactory("redeploy-apps-")); Map<ApplicationId, Future<DeploymentStatus>> deployments = new HashMap<>(); log.log(Level.INFO, () -> "Redeploying " + applicationIds.size() + " apps: " + applicationIds); applicationIds.forEach(appId -> deployments.put(appId, executor.submit(() -> { log.log(Level.INFO, () -> "Starting redeployment of " + appId); Optional<Deployment> deployment = applicationRepository.deployFromLocalActive(appId, true /* bootstrap */); if (deployment.isPresent()) { deployment.get().activate(); log.log(Level.INFO, () -> appId + " redeployed"); return DeploymentStatus.done; } else { log.log(Level.WARNING, () -> "Deployment failed for " + appId + ", unable to get a deployment, active local session is " + applicationRepository.getActiveLocalSession(appId)); return DeploymentStatus.failed; } }))); List<ApplicationId> failedDeployments = checkDeployments(deployments); executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); return failedDeployments; }
class ConfigServerBootstrap extends AbstractComponent implements Runnable { private static final Logger log = Logger.getLogger(ConfigServerBootstrap.class.getName()); enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, FOR_TESTING_NO_BOOTSTRAP_OF_APPS} enum RedeployingApplicationsFails { EXIT_JVM, CONTINUE } enum VipStatusMode { VIP_STATUS_FILE, VIP_STATUS_PROGRAMMATICALLY } private final ApplicationRepository applicationRepository; private final RpcServer server; private final VersionState versionState; private final StateMonitor stateMonitor; private final VipStatus vipStatus; private final ConfigserverConfig configserverConfig; private final Duration maxDurationOfRedeployment; private final Duration sleepTimeWhenRedeployingFails; private final RedeployingApplicationsFails exitIfRedeployingApplicationsFails; private final ExecutorService rpcServerExecutor; @Inject public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus) { this(applicationRepository, server, versionState, stateMonitor, vipStatus, BOOTSTRAP_IN_CONSTRUCTOR, EXIT_JVM, applicationRepository.configserverConfig().hostedVespa() ? VipStatusMode.VIP_STATUS_FILE : VipStatusMode.VIP_STATUS_PROGRAMMATICALLY); } ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus, VipStatusMode vipStatusMode) { this(applicationRepository, server, versionState, stateMonitor, vipStatus, FOR_TESTING_NO_BOOTSTRAP_OF_APPS, CONTINUE, vipStatusMode); } private ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus, Mode mode, RedeployingApplicationsFails exitIfRedeployingApplicationsFails, VipStatusMode vipStatusMode) { this.applicationRepository = applicationRepository; this.server = server; this.versionState = versionState; this.stateMonitor = stateMonitor; this.vipStatus = vipStatus; this.configserverConfig = applicationRepository.configserverConfig(); this.maxDurationOfRedeployment = Duration.ofSeconds(configserverConfig.maxDurationOfBootstrap()); this.sleepTimeWhenRedeployingFails = Duration.ofSeconds(configserverConfig.sleepTimeWhenRedeployingFails()); this.exitIfRedeployingApplicationsFails = exitIfRedeployingApplicationsFails; rpcServerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config server RPC server")); log.log(Level.FINE, () -> "Bootstrap mode: " + mode + ", VIP status mode: " + vipStatusMode); initializing(vipStatusMode); switch (mode) { case BOOTSTRAP_IN_CONSTRUCTOR: start(); break; case FOR_TESTING_NO_BOOTSTRAP_OF_APPS: break; default: throw new IllegalArgumentException("Unknown bootstrap mode " + mode + ", legal values: " + Arrays.toString(Mode.values())); } } @Override public void deconstruct() { log.log(Level.INFO, "Stopping config server"); down(); server.stop(); log.log(Level.FINE, "RPC server stopped"); rpcServerExecutor.shutdown(); } @Override public void run() { start(); do { try { Thread.sleep(1000); } catch (InterruptedException e) { log.log(Level.SEVERE, "Got interrupted", e); break; } } while (server.isRunning()); down(); } public void start() { if (versionState.isUpgraded()) { log.log(Level.INFO, "Config server upgrading from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications"); try { if ( ! redeployAllApplications()) { redeployingApplicationsFailed(); return; } versionState.saveNewVersion(); log.log(Level.INFO, "All applications redeployed successfully"); } catch (Exception e) { log.log(Level.SEVERE, "Redeployment of applications failed", e); redeployingApplicationsFailed(); return; } } applicationRepository.bootstrappingDone(); startRpcServer(); up(); } StateMonitor.Status status() { return stateMonitor.status(); } private void up() { vipStatus.setInRotation(true); } private void down() { vipStatus.setInRotation(false); } private void initializing(VipStatusMode vipStatusMode) { stateMonitor.status(StateMonitor.Status.initializing); if (vipStatusMode == VipStatusMode.VIP_STATUS_PROGRAMMATICALLY) vipStatus.setInRotation(false); } private void startRpcServer() { rpcServerExecutor.execute(server); Instant end = Instant.now().plus(Duration.ofSeconds(10)); while (!server.isRunning() && Instant.now().isBefore(end)) { try { Thread.sleep(10); } catch (InterruptedException e) { log.log(Level.SEVERE, "Got interrupted", e); break; } } if (!server.isRunning()) throw new RuntimeException("RPC server not started in 10 seconds"); } private void redeployingApplicationsFailed() { if (exitIfRedeployingApplicationsFails == EXIT_JVM) System.exit(1); } private boolean redeployAllApplications() throws InterruptedException { Instant end = Instant.now().plus(maxDurationOfRedeployment); List<ApplicationId> applicationsToRedeploy = applicationRepository.listApplications(); Collections.shuffle(applicationsToRedeploy); long failCount = 0; do { applicationsToRedeploy = redeployApplications(applicationsToRedeploy); if ( ! applicationsToRedeploy.isEmpty() && ! sleepTimeWhenRedeployingFails.isZero()) { Duration sleepTime = sleepTimeWhenRedeployingFails.multipliedBy(++failCount); if (sleepTime.compareTo(Duration.ofMinutes(10)) > 0) sleepTime = Duration.ofMinutes(10); log.log(Level.INFO, "Redeployment of " + applicationsToRedeploy + " not finished, will retry in " + sleepTime); Thread.sleep(sleepTime.toMillis()); } } while ( ! applicationsToRedeploy.isEmpty() && Instant.now().isBefore(end)); if ( ! applicationsToRedeploy.isEmpty()) { log.log(Level.SEVERE, "Redeploying applications not finished after " + maxDurationOfRedeployment + ", exiting, applications that failed redeployment: " + applicationsToRedeploy); return false; } return true; } private enum DeploymentStatus { inProgress, done, failed}; private List<ApplicationId> checkDeployments(Map<ApplicationId, Future<DeploymentStatus>> deployments) { int applicationCount = deployments.size(); Set<ApplicationId> failedDeployments = new LinkedHashSet<>(); Set<ApplicationId> finishedDeployments = new LinkedHashSet<>(); Instant lastLogged = Instant.EPOCH; do { deployments.forEach((applicationId, future) -> { if (finishedDeployments.contains(applicationId) || failedDeployments.contains(applicationId)) return; DeploymentStatus status = getDeploymentStatus(applicationId, future); switch (status) { case done: finishedDeployments.add(applicationId); break; case inProgress: break; case failed: failedDeployments.add(applicationId); break; default: throw new IllegalArgumentException("Unknown deployment status " + status); } }); if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) { log.log(Level.INFO, () -> finishedDeployments.size() + " of " + applicationCount + " apps redeployed " + "(" + failedDeployments.size() + " failed)"); lastLogged = Instant.now(); } } while (failedDeployments.size() + finishedDeployments.size() < applicationCount); return new ArrayList<>(failedDeployments); } private DeploymentStatus getDeploymentStatus(ApplicationId applicationId, Future<DeploymentStatus> future) { try { return future.get(1, TimeUnit.MILLISECONDS); } catch (ExecutionException | InterruptedException e) { if (e.getCause() instanceof TransientException) { log.log(Level.INFO, "Redeploying " + applicationId + " failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e)); } else { log.log(Level.WARNING, "Redeploying " + applicationId + " failed, will retry", e); } return DeploymentStatus.failed; } catch (TimeoutException e) { return DeploymentStatus.inProgress; } } }
class ConfigServerBootstrap extends AbstractComponent implements Runnable { private static final Logger log = Logger.getLogger(ConfigServerBootstrap.class.getName()); enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, FOR_TESTING_NO_BOOTSTRAP_OF_APPS} enum RedeployingApplicationsFails { EXIT_JVM, CONTINUE } enum VipStatusMode { VIP_STATUS_FILE, VIP_STATUS_PROGRAMMATICALLY } private final ApplicationRepository applicationRepository; private final RpcServer server; private final VersionState versionState; private final StateMonitor stateMonitor; private final VipStatus vipStatus; private final ConfigserverConfig configserverConfig; private final Duration maxDurationOfRedeployment; private final Duration sleepTimeWhenRedeployingFails; private final RedeployingApplicationsFails exitIfRedeployingApplicationsFails; private final ExecutorService rpcServerExecutor; @Inject public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus) { this(applicationRepository, server, versionState, stateMonitor, vipStatus, BOOTSTRAP_IN_CONSTRUCTOR, EXIT_JVM, applicationRepository.configserverConfig().hostedVespa() ? VipStatusMode.VIP_STATUS_FILE : VipStatusMode.VIP_STATUS_PROGRAMMATICALLY); } ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus, VipStatusMode vipStatusMode) { this(applicationRepository, server, versionState, stateMonitor, vipStatus, FOR_TESTING_NO_BOOTSTRAP_OF_APPS, CONTINUE, vipStatusMode); } private ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus, Mode mode, RedeployingApplicationsFails exitIfRedeployingApplicationsFails, VipStatusMode vipStatusMode) { this.applicationRepository = applicationRepository; this.server = server; this.versionState = versionState; this.stateMonitor = stateMonitor; this.vipStatus = vipStatus; this.configserverConfig = applicationRepository.configserverConfig(); this.maxDurationOfRedeployment = Duration.ofSeconds(configserverConfig.maxDurationOfBootstrap()); this.sleepTimeWhenRedeployingFails = Duration.ofSeconds(configserverConfig.sleepTimeWhenRedeployingFails()); this.exitIfRedeployingApplicationsFails = exitIfRedeployingApplicationsFails; rpcServerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config server RPC server")); log.log(Level.FINE, () -> "Bootstrap mode: " + mode + ", VIP status mode: " + vipStatusMode); initializing(vipStatusMode); switch (mode) { case BOOTSTRAP_IN_CONSTRUCTOR: start(); break; case FOR_TESTING_NO_BOOTSTRAP_OF_APPS: break; default: throw new IllegalArgumentException("Unknown bootstrap mode " + mode + ", legal values: " + Arrays.toString(Mode.values())); } } @Override public void deconstruct() { log.log(Level.INFO, "Stopping config server"); down(); server.stop(); log.log(Level.FINE, "RPC server stopped"); rpcServerExecutor.shutdown(); } @Override public void run() { start(); do { try { Thread.sleep(1000); } catch (InterruptedException e) { log.log(Level.SEVERE, "Got interrupted", e); break; } } while (server.isRunning()); down(); } public void start() { if (versionState.isUpgraded()) { log.log(Level.INFO, "Config server upgrading from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications"); try { if ( ! redeployAllApplications()) { redeployingApplicationsFailed(); return; } versionState.saveNewVersion(); log.log(Level.INFO, "All applications redeployed successfully"); } catch (Exception e) { log.log(Level.SEVERE, "Redeployment of applications failed", e); redeployingApplicationsFailed(); return; } } applicationRepository.bootstrappingDone(); startRpcServer(); up(); } StateMonitor.Status status() { return stateMonitor.status(); } private void up() { vipStatus.setInRotation(true); } private void down() { vipStatus.setInRotation(false); } private void initializing(VipStatusMode vipStatusMode) { stateMonitor.status(StateMonitor.Status.initializing); if (vipStatusMode == VipStatusMode.VIP_STATUS_PROGRAMMATICALLY) vipStatus.setInRotation(false); } private void startRpcServer() { rpcServerExecutor.execute(server); Instant end = Instant.now().plus(Duration.ofSeconds(10)); while (!server.isRunning() && Instant.now().isBefore(end)) { try { Thread.sleep(10); } catch (InterruptedException e) { log.log(Level.SEVERE, "Got interrupted", e); break; } } if (!server.isRunning()) throw new RuntimeException("RPC server not started in 10 seconds"); } private void redeployingApplicationsFailed() { if (exitIfRedeployingApplicationsFails == EXIT_JVM) System.exit(1); } private boolean redeployAllApplications() throws InterruptedException { Instant end = Instant.now().plus(maxDurationOfRedeployment); List<ApplicationId> applicationsToRedeploy = applicationRepository.listApplications(); Collections.shuffle(applicationsToRedeploy); long failCount = 0; do { applicationsToRedeploy = redeployApplications(applicationsToRedeploy); if ( ! applicationsToRedeploy.isEmpty() && ! sleepTimeWhenRedeployingFails.isZero()) { Duration sleepTime = sleepTimeWhenRedeployingFails.multipliedBy(++failCount); if (sleepTime.compareTo(Duration.ofMinutes(10)) > 0) sleepTime = Duration.ofMinutes(10); log.log(Level.INFO, "Redeployment of " + applicationsToRedeploy + " not finished, will retry in " + sleepTime); Thread.sleep(sleepTime.toMillis()); } } while ( ! applicationsToRedeploy.isEmpty() && Instant.now().isBefore(end)); if ( ! applicationsToRedeploy.isEmpty()) { log.log(Level.SEVERE, "Redeploying applications not finished after " + maxDurationOfRedeployment + ", exiting, applications that failed redeployment: " + applicationsToRedeploy); return false; } return true; } private enum DeploymentStatus { inProgress, done, failed}; private List<ApplicationId> checkDeployments(Map<ApplicationId, Future<DeploymentStatus>> deployments) { int applicationCount = deployments.size(); Set<ApplicationId> failedDeployments = new LinkedHashSet<>(); Set<ApplicationId> finishedDeployments = new LinkedHashSet<>(); Instant lastLogged = Instant.EPOCH; do { deployments.forEach((applicationId, future) -> { if (finishedDeployments.contains(applicationId) || failedDeployments.contains(applicationId)) return; DeploymentStatus status = getDeploymentStatus(applicationId, future); switch (status) { case done: finishedDeployments.add(applicationId); break; case inProgress: break; case failed: failedDeployments.add(applicationId); break; default: throw new IllegalArgumentException("Unknown deployment status " + status); } }); if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) { log.log(Level.INFO, () -> finishedDeployments.size() + " of " + applicationCount + " apps redeployed " + "(" + failedDeployments.size() + " failed)"); lastLogged = Instant.now(); } } while (failedDeployments.size() + finishedDeployments.size() < applicationCount); return new ArrayList<>(failedDeployments); } private DeploymentStatus getDeploymentStatus(ApplicationId applicationId, Future<DeploymentStatus> future) { try { return future.get(1, TimeUnit.MILLISECONDS); } catch (ExecutionException | InterruptedException e) { if (e.getCause() instanceof TransientException) { log.log(Level.INFO, "Redeploying " + applicationId + " failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e)); } else { log.log(Level.WARNING, "Redeploying " + applicationId + " failed, will retry", e); } return DeploymentStatus.failed; } catch (TimeoutException e) { return DeploymentStatus.inProgress; } } }
Definitely, thanks, fixed.
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(), new DaemonThreadFactory("redeploy-apps-")); Map<ApplicationId, Future<DeploymentStatus>> deployments = new HashMap<>(); log.log(Level.INFO, () -> "Redeploying " + applicationIds.size() + " apps: " + applicationIds); applicationIds.forEach(appId -> deployments.put(appId, executor.submit(() -> { log.log(Level.INFO, () -> "Starting redeployment of " + appId); Optional<Deployment> deployment = applicationRepository.deployFromLocalActive(appId, true /* bootstrap */); if (deployment.isPresent()) { deployment.get().activate(); log.log(Level.INFO, () -> appId + " redeployed"); return DeploymentStatus.done; } else { log.log(Level.INFO, () -> "Deployment failed for " + appId + ", unable to get a deployment, active local session is " + applicationRepository.getActiveLocalSession(appId)); return DeploymentStatus.failed; } }))); List<ApplicationId> failedDeployments = checkDeployments(deployments); executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); return failedDeployments; }
log.log(Level.INFO, () -> "Deployment failed for " + appId +
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(), new DaemonThreadFactory("redeploy-apps-")); Map<ApplicationId, Future<DeploymentStatus>> deployments = new HashMap<>(); log.log(Level.INFO, () -> "Redeploying " + applicationIds.size() + " apps: " + applicationIds); applicationIds.forEach(appId -> deployments.put(appId, executor.submit(() -> { log.log(Level.INFO, () -> "Starting redeployment of " + appId); Optional<Deployment> deployment = applicationRepository.deployFromLocalActive(appId, true /* bootstrap */); if (deployment.isPresent()) { deployment.get().activate(); log.log(Level.INFO, () -> appId + " redeployed"); return DeploymentStatus.done; } else { log.log(Level.WARNING, () -> "Deployment failed for " + appId + ", unable to get a deployment, active local session is " + applicationRepository.getActiveLocalSession(appId)); return DeploymentStatus.failed; } }))); List<ApplicationId> failedDeployments = checkDeployments(deployments); executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); return failedDeployments; }
class ConfigServerBootstrap extends AbstractComponent implements Runnable { private static final Logger log = Logger.getLogger(ConfigServerBootstrap.class.getName()); enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, FOR_TESTING_NO_BOOTSTRAP_OF_APPS} enum RedeployingApplicationsFails { EXIT_JVM, CONTINUE } enum VipStatusMode { VIP_STATUS_FILE, VIP_STATUS_PROGRAMMATICALLY } private final ApplicationRepository applicationRepository; private final RpcServer server; private final VersionState versionState; private final StateMonitor stateMonitor; private final VipStatus vipStatus; private final ConfigserverConfig configserverConfig; private final Duration maxDurationOfRedeployment; private final Duration sleepTimeWhenRedeployingFails; private final RedeployingApplicationsFails exitIfRedeployingApplicationsFails; private final ExecutorService rpcServerExecutor; @Inject public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus) { this(applicationRepository, server, versionState, stateMonitor, vipStatus, BOOTSTRAP_IN_CONSTRUCTOR, EXIT_JVM, applicationRepository.configserverConfig().hostedVespa() ? VipStatusMode.VIP_STATUS_FILE : VipStatusMode.VIP_STATUS_PROGRAMMATICALLY); } ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus, VipStatusMode vipStatusMode) { this(applicationRepository, server, versionState, stateMonitor, vipStatus, FOR_TESTING_NO_BOOTSTRAP_OF_APPS, CONTINUE, vipStatusMode); } private ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus, Mode mode, RedeployingApplicationsFails exitIfRedeployingApplicationsFails, VipStatusMode vipStatusMode) { this.applicationRepository = applicationRepository; this.server = server; this.versionState = versionState; this.stateMonitor = stateMonitor; this.vipStatus = vipStatus; this.configserverConfig = applicationRepository.configserverConfig(); this.maxDurationOfRedeployment = Duration.ofSeconds(configserverConfig.maxDurationOfBootstrap()); this.sleepTimeWhenRedeployingFails = Duration.ofSeconds(configserverConfig.sleepTimeWhenRedeployingFails()); this.exitIfRedeployingApplicationsFails = exitIfRedeployingApplicationsFails; rpcServerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config server RPC server")); log.log(Level.FINE, () -> "Bootstrap mode: " + mode + ", VIP status mode: " + vipStatusMode); initializing(vipStatusMode); switch (mode) { case BOOTSTRAP_IN_CONSTRUCTOR: start(); break; case FOR_TESTING_NO_BOOTSTRAP_OF_APPS: break; default: throw new IllegalArgumentException("Unknown bootstrap mode " + mode + ", legal values: " + Arrays.toString(Mode.values())); } } @Override public void deconstruct() { log.log(Level.INFO, "Stopping config server"); down(); server.stop(); log.log(Level.FINE, "RPC server stopped"); rpcServerExecutor.shutdown(); } @Override public void run() { start(); do { try { Thread.sleep(1000); } catch (InterruptedException e) { log.log(Level.SEVERE, "Got interrupted", e); break; } } while (server.isRunning()); down(); } public void start() { if (versionState.isUpgraded()) { log.log(Level.INFO, "Config server upgrading from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications"); try { if ( ! redeployAllApplications()) { redeployingApplicationsFailed(); return; } versionState.saveNewVersion(); log.log(Level.INFO, "All applications redeployed successfully"); } catch (Exception e) { log.log(Level.SEVERE, "Redeployment of applications failed", e); redeployingApplicationsFailed(); return; } } applicationRepository.bootstrappingDone(); startRpcServer(); up(); } StateMonitor.Status status() { return stateMonitor.status(); } private void up() { vipStatus.setInRotation(true); } private void down() { vipStatus.setInRotation(false); } private void initializing(VipStatusMode vipStatusMode) { stateMonitor.status(StateMonitor.Status.initializing); if (vipStatusMode == VipStatusMode.VIP_STATUS_PROGRAMMATICALLY) vipStatus.setInRotation(false); } private void startRpcServer() { rpcServerExecutor.execute(server); Instant end = Instant.now().plus(Duration.ofSeconds(10)); while (!server.isRunning() && Instant.now().isBefore(end)) { try { Thread.sleep(10); } catch (InterruptedException e) { log.log(Level.SEVERE, "Got interrupted", e); break; } } if (!server.isRunning()) throw new RuntimeException("RPC server not started in 10 seconds"); } private void redeployingApplicationsFailed() { if (exitIfRedeployingApplicationsFails == EXIT_JVM) System.exit(1); } private boolean redeployAllApplications() throws InterruptedException { Instant end = Instant.now().plus(maxDurationOfRedeployment); List<ApplicationId> applicationsToRedeploy = applicationRepository.listApplications(); Collections.shuffle(applicationsToRedeploy); long failCount = 0; do { applicationsToRedeploy = redeployApplications(applicationsToRedeploy); if ( ! applicationsToRedeploy.isEmpty() && ! sleepTimeWhenRedeployingFails.isZero()) { Duration sleepTime = sleepTimeWhenRedeployingFails.multipliedBy(++failCount); if (sleepTime.compareTo(Duration.ofMinutes(10)) > 0) sleepTime = Duration.ofMinutes(10); log.log(Level.INFO, "Redeployment of " + applicationsToRedeploy + " not finished, will retry in " + sleepTime); Thread.sleep(sleepTime.toMillis()); } } while ( ! applicationsToRedeploy.isEmpty() && Instant.now().isBefore(end)); if ( ! applicationsToRedeploy.isEmpty()) { log.log(Level.SEVERE, "Redeploying applications not finished after " + maxDurationOfRedeployment + ", exiting, applications that failed redeployment: " + applicationsToRedeploy); return false; } return true; } private enum DeploymentStatus { inProgress, done, failed}; private List<ApplicationId> checkDeployments(Map<ApplicationId, Future<DeploymentStatus>> deployments) { int applicationCount = deployments.size(); Set<ApplicationId> failedDeployments = new LinkedHashSet<>(); Set<ApplicationId> finishedDeployments = new LinkedHashSet<>(); Instant lastLogged = Instant.EPOCH; do { deployments.forEach((applicationId, future) -> { if (finishedDeployments.contains(applicationId) || failedDeployments.contains(applicationId)) return; DeploymentStatus status = getDeploymentStatus(applicationId, future); switch (status) { case done: finishedDeployments.add(applicationId); break; case inProgress: break; case failed: failedDeployments.add(applicationId); break; default: throw new IllegalArgumentException("Unknown deployment status " + status); } }); if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) { log.log(Level.INFO, () -> finishedDeployments.size() + " of " + applicationCount + " apps redeployed " + "(" + failedDeployments.size() + " failed)"); lastLogged = Instant.now(); } } while (failedDeployments.size() + finishedDeployments.size() < applicationCount); return new ArrayList<>(failedDeployments); } private DeploymentStatus getDeploymentStatus(ApplicationId applicationId, Future<DeploymentStatus> future) { try { return future.get(1, TimeUnit.MILLISECONDS); } catch (ExecutionException | InterruptedException e) { if (e.getCause() instanceof TransientException) { log.log(Level.INFO, "Redeploying " + applicationId + " failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e)); } else { log.log(Level.WARNING, "Redeploying " + applicationId + " failed, will retry", e); } return DeploymentStatus.failed; } catch (TimeoutException e) { return DeploymentStatus.inProgress; } } }
class ConfigServerBootstrap extends AbstractComponent implements Runnable { private static final Logger log = Logger.getLogger(ConfigServerBootstrap.class.getName()); enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, FOR_TESTING_NO_BOOTSTRAP_OF_APPS} enum RedeployingApplicationsFails { EXIT_JVM, CONTINUE } enum VipStatusMode { VIP_STATUS_FILE, VIP_STATUS_PROGRAMMATICALLY } private final ApplicationRepository applicationRepository; private final RpcServer server; private final VersionState versionState; private final StateMonitor stateMonitor; private final VipStatus vipStatus; private final ConfigserverConfig configserverConfig; private final Duration maxDurationOfRedeployment; private final Duration sleepTimeWhenRedeployingFails; private final RedeployingApplicationsFails exitIfRedeployingApplicationsFails; private final ExecutorService rpcServerExecutor; @Inject public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus) { this(applicationRepository, server, versionState, stateMonitor, vipStatus, BOOTSTRAP_IN_CONSTRUCTOR, EXIT_JVM, applicationRepository.configserverConfig().hostedVespa() ? VipStatusMode.VIP_STATUS_FILE : VipStatusMode.VIP_STATUS_PROGRAMMATICALLY); } ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus, VipStatusMode vipStatusMode) { this(applicationRepository, server, versionState, stateMonitor, vipStatus, FOR_TESTING_NO_BOOTSTRAP_OF_APPS, CONTINUE, vipStatusMode); } private ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState, StateMonitor stateMonitor, VipStatus vipStatus, Mode mode, RedeployingApplicationsFails exitIfRedeployingApplicationsFails, VipStatusMode vipStatusMode) { this.applicationRepository = applicationRepository; this.server = server; this.versionState = versionState; this.stateMonitor = stateMonitor; this.vipStatus = vipStatus; this.configserverConfig = applicationRepository.configserverConfig(); this.maxDurationOfRedeployment = Duration.ofSeconds(configserverConfig.maxDurationOfBootstrap()); this.sleepTimeWhenRedeployingFails = Duration.ofSeconds(configserverConfig.sleepTimeWhenRedeployingFails()); this.exitIfRedeployingApplicationsFails = exitIfRedeployingApplicationsFails; rpcServerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config server RPC server")); log.log(Level.FINE, () -> "Bootstrap mode: " + mode + ", VIP status mode: " + vipStatusMode); initializing(vipStatusMode); switch (mode) { case BOOTSTRAP_IN_CONSTRUCTOR: start(); break; case FOR_TESTING_NO_BOOTSTRAP_OF_APPS: break; default: throw new IllegalArgumentException("Unknown bootstrap mode " + mode + ", legal values: " + Arrays.toString(Mode.values())); } } @Override public void deconstruct() { log.log(Level.INFO, "Stopping config server"); down(); server.stop(); log.log(Level.FINE, "RPC server stopped"); rpcServerExecutor.shutdown(); } @Override public void run() { start(); do { try { Thread.sleep(1000); } catch (InterruptedException e) { log.log(Level.SEVERE, "Got interrupted", e); break; } } while (server.isRunning()); down(); } public void start() { if (versionState.isUpgraded()) { log.log(Level.INFO, "Config server upgrading from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications"); try { if ( ! redeployAllApplications()) { redeployingApplicationsFailed(); return; } versionState.saveNewVersion(); log.log(Level.INFO, "All applications redeployed successfully"); } catch (Exception e) { log.log(Level.SEVERE, "Redeployment of applications failed", e); redeployingApplicationsFailed(); return; } } applicationRepository.bootstrappingDone(); startRpcServer(); up(); } StateMonitor.Status status() { return stateMonitor.status(); } private void up() { vipStatus.setInRotation(true); } private void down() { vipStatus.setInRotation(false); } private void initializing(VipStatusMode vipStatusMode) { stateMonitor.status(StateMonitor.Status.initializing); if (vipStatusMode == VipStatusMode.VIP_STATUS_PROGRAMMATICALLY) vipStatus.setInRotation(false); } private void startRpcServer() { rpcServerExecutor.execute(server); Instant end = Instant.now().plus(Duration.ofSeconds(10)); while (!server.isRunning() && Instant.now().isBefore(end)) { try { Thread.sleep(10); } catch (InterruptedException e) { log.log(Level.SEVERE, "Got interrupted", e); break; } } if (!server.isRunning()) throw new RuntimeException("RPC server not started in 10 seconds"); } private void redeployingApplicationsFailed() { if (exitIfRedeployingApplicationsFails == EXIT_JVM) System.exit(1); } private boolean redeployAllApplications() throws InterruptedException { Instant end = Instant.now().plus(maxDurationOfRedeployment); List<ApplicationId> applicationsToRedeploy = applicationRepository.listApplications(); Collections.shuffle(applicationsToRedeploy); long failCount = 0; do { applicationsToRedeploy = redeployApplications(applicationsToRedeploy); if ( ! applicationsToRedeploy.isEmpty() && ! sleepTimeWhenRedeployingFails.isZero()) { Duration sleepTime = sleepTimeWhenRedeployingFails.multipliedBy(++failCount); if (sleepTime.compareTo(Duration.ofMinutes(10)) > 0) sleepTime = Duration.ofMinutes(10); log.log(Level.INFO, "Redeployment of " + applicationsToRedeploy + " not finished, will retry in " + sleepTime); Thread.sleep(sleepTime.toMillis()); } } while ( ! applicationsToRedeploy.isEmpty() && Instant.now().isBefore(end)); if ( ! applicationsToRedeploy.isEmpty()) { log.log(Level.SEVERE, "Redeploying applications not finished after " + maxDurationOfRedeployment + ", exiting, applications that failed redeployment: " + applicationsToRedeploy); return false; } return true; } private enum DeploymentStatus { inProgress, done, failed}; private List<ApplicationId> checkDeployments(Map<ApplicationId, Future<DeploymentStatus>> deployments) { int applicationCount = deployments.size(); Set<ApplicationId> failedDeployments = new LinkedHashSet<>(); Set<ApplicationId> finishedDeployments = new LinkedHashSet<>(); Instant lastLogged = Instant.EPOCH; do { deployments.forEach((applicationId, future) -> { if (finishedDeployments.contains(applicationId) || failedDeployments.contains(applicationId)) return; DeploymentStatus status = getDeploymentStatus(applicationId, future); switch (status) { case done: finishedDeployments.add(applicationId); break; case inProgress: break; case failed: failedDeployments.add(applicationId); break; default: throw new IllegalArgumentException("Unknown deployment status " + status); } }); if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) { log.log(Level.INFO, () -> finishedDeployments.size() + " of " + applicationCount + " apps redeployed " + "(" + failedDeployments.size() + " failed)"); lastLogged = Instant.now(); } } while (failedDeployments.size() + finishedDeployments.size() < applicationCount); return new ArrayList<>(failedDeployments); } private DeploymentStatus getDeploymentStatus(ApplicationId applicationId, Future<DeploymentStatus> future) { try { return future.get(1, TimeUnit.MILLISECONDS); } catch (ExecutionException | InterruptedException e) { if (e.getCause() instanceof TransientException) { log.log(Level.INFO, "Redeploying " + applicationId + " failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e)); } else { log.log(Level.WARNING, "Redeploying " + applicationId + " failed, will retry", e); } return DeploymentStatus.failed; } catch (TimeoutException e) { return DeploymentStatus.inProgress; } } }
```suggestion ```
public void testCost() { ClusterResources r1 = new ClusterResources(3, 1, new NodeResources(2, 8, 50, 1)); ClusterResources r2 = new ClusterResources(3, 1, new NodeResources(2, 16, 50, 1)); System.out.println(r1.cost()*24*30); System.out.println(r2.cost()*24*30); System.out.println((r1.cost()*24*30 + r2.cost()*24*30) * 1.05); assertEquals(1.818, r1.cost() + r2.cost(), 0.01); }
System.out.println((r1.cost()*24*30 + r2.cost()*24*30) * 1.05);
public void testCost() { ClusterResources r1 = new ClusterResources(3, 1, new NodeResources(2, 8, 50, 1)); ClusterResources r2 = new ClusterResources(3, 1, new NodeResources(2, 16, 50, 1)); assertEquals(1.818, r1.cost() + r2.cost(), 0.01); }
class ClusterResourcesTest { @Test }
class ClusterResourcesTest { @Test }
XD
private Optional<ConvergenceSummary> convergenceSummaryFrom(Inspector summaryArray) { if ( ! summaryArray.valid()) return Optional.empty(); if (summaryArray.entries() != 12 && summaryArray.entries() != 13) throw new IllegalArgumentException("Convergence summary must have 13 entries"); return Optional.of(new ConvergenceSummary(summaryArray.entry(0).asLong(), summaryArray.entry(1).asLong(), summaryArray.entry(2).asLong(), summaryArray.entry(3).asLong(), summaryArray.entry(4).asLong(), summaryArray.entry(5).asLong(), summaryArray.entry(6).asLong(), summaryArray.entry(7).asLong(), summaryArray.entry(8).asLong(), summaryArray.entry(9).asLong(), summaryArray.entry(10).asLong(), summaryArray.entry(11).asLong(), summaryArray.entry(12).asLong())); }
throw new IllegalArgumentException("Convergence summary must have 13 entries");
private Optional<ConvergenceSummary> convergenceSummaryFrom(Inspector summaryArray) { if ( ! summaryArray.valid()) return Optional.empty(); if (summaryArray.entries() != 12 && summaryArray.entries() != 13) throw new IllegalArgumentException("Convergence summary must have 13 entries"); return Optional.of(new ConvergenceSummary(summaryArray.entry(0).asLong(), summaryArray.entry(1).asLong(), summaryArray.entry(2).asLong(), summaryArray.entry(3).asLong(), summaryArray.entry(4).asLong(), summaryArray.entry(5).asLong(), summaryArray.entry(6).asLong(), summaryArray.entry(7).asLong(), summaryArray.entry(8).asLong(), summaryArray.entry(9).asLong(), summaryArray.entry(10).asLong(), summaryArray.entry(11).asLong(), summaryArray.entry(12).asLong())); }
class RunSerializer { private static final String stepsField = "steps"; private static final String stepDetailsField = "stepDetails"; private static final String startTimeField = "startTime"; private static final String applicationField = "id"; private static final String jobTypeField = "type"; private static final String numberField = "number"; private static final String startField = "start"; private static final String endField = "end"; private static final String statusField = "status"; private static final String versionsField = "versions"; private static final String platformVersionField = "platform"; private static final String repositoryField = "repository"; private static final String branchField = "branch"; private static final String commitField = "commit"; private static final String authorEmailField = "authorEmail"; private static final String compileVersionField = "compileVersion"; private static final String buildTimeField = "buildTime"; private static final String sourceUrlField = "sourceUrl"; private static final String buildField = "build"; private static final String sourceField = "source"; private static final String lastTestRecordField = "lastTestRecord"; private static final String lastVespaLogTimestampField = "lastVespaLogTimestamp"; private static final String noNodesDownSinceField = "noNodesDownSince"; private static final String oldConvergenceSummaryField = "convergenceSummary"; private static final String convergenceSummaryField = "convergenceSummaryV2"; private static final String testerCertificateField = "testerCertificate"; Run runFromSlime(Slime slime) { return runFromSlime(slime.get()); } NavigableMap<RunId, Run> runsFromSlime(Slime slime) { NavigableMap<RunId, Run> runs = new TreeMap<>(comparing(RunId::number)); Inspector runArray = slime.get(); runArray.traverse((ArrayTraverser) (__, runObject) -> { Run run = runFromSlime(runObject); runs.put(run.id(), run); }); return runs; } private Run runFromSlime(Inspector runObject) { var steps = new EnumMap<Step, StepInfo>(Step.class); Inspector detailsField = runObject.field(stepDetailsField); runObject.field(stepsField).traverse((ObjectTraverser) (step, status) -> { Step typedStep = stepOf(step); Inspector stepDetailsField = detailsField.field(step); Inspector startTimeValue = stepDetailsField.field(startTimeField); Optional<Instant> startTime = Serializers.optionalInstant(startTimeValue); steps.put(typedStep, new StepInfo(typedStep, stepStatusOf(status.asString()), startTime)); }); return new Run(new RunId(ApplicationId.fromSerializedForm(runObject.field(applicationField).asString()), JobType.fromJobName(runObject.field(jobTypeField).asString()), runObject.field(numberField).asLong()), steps, versionsFromSlime(runObject.field(versionsField)), Serializers.instant(runObject.field(startField)), Serializers.optionalInstant(runObject.field(endField)), runStatusOf(runObject.field(statusField).asString()), runObject.field(lastTestRecordField).asLong(), Instant.EPOCH.plus(runObject.field(lastVespaLogTimestampField).asLong(), ChronoUnit.MICROS), Serializers.optionalInstant(runObject.field(noNodesDownSinceField)), convergenceSummaryFrom(runObject.field(convergenceSummaryField)) .or(() ->convergenceSummaryFrom(runObject.field(oldConvergenceSummaryField))), Optional.of(runObject.field(testerCertificateField)) .filter(Inspector::valid) .map(certificate -> X509CertificateUtils.fromPem(certificate.asString()))); } private Versions versionsFromSlime(Inspector versionsObject) { Version targetPlatformVersion = Version.fromString(versionsObject.field(platformVersionField).asString()); ApplicationVersion targetApplicationVersion = applicationVersionFrom(versionsObject); Optional<Version> sourcePlatformVersion = versionsObject.field(sourceField).valid() ? Optional.of(Version.fromString(versionsObject.field(sourceField).field(platformVersionField).asString())) : Optional.empty(); Optional<ApplicationVersion> sourceApplicationVersion = versionsObject.field(sourceField).valid() ? Optional.of(applicationVersionFrom(versionsObject.field(sourceField))) : Optional.empty(); return new Versions(targetPlatformVersion, targetApplicationVersion, sourcePlatformVersion, sourceApplicationVersion); } private ApplicationVersion applicationVersionFrom(Inspector versionObject) { if ( ! versionObject.field(buildField).valid()) return ApplicationVersion.unknown; long buildNumber = versionObject.field(buildField).asLong(); Optional<SourceRevision> source = Optional.of(new SourceRevision(versionObject.field(repositoryField).asString(), versionObject.field(branchField).asString(), versionObject.field(commitField).asString())) .filter(revision -> ! revision.commit().isBlank() && ! revision.repository().isBlank() && ! revision.branch().isBlank()); Optional<String> authorEmail = Serializers.optionalString(versionObject.field(authorEmailField)); Optional<Version> compileVersion = Serializers.optionalString(versionObject.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = Serializers.optionalInstant(versionObject.field(buildTimeField)); Optional<String> sourceUrl = Serializers.optionalString(versionObject.field(sourceUrlField)); Optional<String> commit = Serializers.optionalString(versionObject.field(commitField)); return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, compileVersion, buildTime, sourceUrl, commit); } Slime toSlime(Iterable<Run> runs) { Slime slime = new Slime(); Cursor runArray = slime.setArray(); runs.forEach(run -> toSlime(run, runArray.addObject())); return slime; } Slime toSlime(Run run) { Slime slime = new Slime(); toSlime(run, slime.setObject()); return slime; } private void toSlime(Run run, Cursor runObject) { runObject.setString(applicationField, run.id().application().serializedForm()); runObject.setString(jobTypeField, run.id().type().jobName()); runObject.setLong(numberField, run.id().number()); runObject.setLong(startField, run.start().toEpochMilli()); run.end().ifPresent(end -> runObject.setLong(endField, end.toEpochMilli())); runObject.setString(statusField, valueOf(run.status())); runObject.setLong(lastTestRecordField, run.lastTestLogEntry()); runObject.setLong(lastVespaLogTimestampField, Instant.EPOCH.until(run.lastVespaLogTimestamp(), ChronoUnit.MICROS)); run.noNodesDownSince().ifPresent(noNodesDownSince -> runObject.setLong(noNodesDownSinceField, noNodesDownSince.toEpochMilli())); run.convergenceSummary().ifPresent(convergenceSummary -> { toSlime(convergenceSummary, runObject.setArray(convergenceSummaryField), false); toSlime(convergenceSummary, runObject.setArray(oldConvergenceSummaryField), true); }); run.testerCertificate().ifPresent(certificate -> runObject.setString(testerCertificateField, X509CertificateUtils.toPem(certificate))); Cursor stepsObject = runObject.setObject(stepsField); run.steps().forEach((step, statusInfo) -> stepsObject.setString(valueOf(step), valueOf(statusInfo.status()))); Cursor stepDetailsObject = runObject.setObject(stepDetailsField); run.steps().forEach((step, statusInfo) -> statusInfo.startTime().ifPresent(startTime -> stepDetailsObject.setObject(valueOf(step)).setLong(startTimeField, valueOf(startTime)))); Cursor versionsObject = runObject.setObject(versionsField); toSlime(run.versions().targetPlatform(), run.versions().targetApplication(), versionsObject); run.versions().sourcePlatform().ifPresent(sourcePlatformVersion -> { toSlime(sourcePlatformVersion, run.versions().sourceApplication() .orElseThrow(() -> new IllegalArgumentException("Source versions must be both present or absent.")), versionsObject.setObject(sourceField)); }); } private void toSlime(Version platformVersion, ApplicationVersion applicationVersion, Cursor versionsObject) { versionsObject.setString(platformVersionField, platformVersion.toString()); applicationVersion.buildNumber().ifPresent(number -> versionsObject.setLong(buildField, number)); applicationVersion.source().map(SourceRevision::repository).ifPresent(repository -> versionsObject.setString(repositoryField, repository)); applicationVersion.source().map(SourceRevision::branch).ifPresent(branch -> versionsObject.setString(branchField, branch)); applicationVersion.source().map(SourceRevision::commit).ifPresent(commit -> versionsObject.setString(commitField, commit)); applicationVersion.authorEmail().ifPresent(email -> versionsObject.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> versionsObject.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> versionsObject.setLong(buildTimeField, time.toEpochMilli())); applicationVersion.sourceUrl().ifPresent(url -> versionsObject.setString(sourceUrlField, url)); applicationVersion.commit().ifPresent(commit -> versionsObject.setString(commitField, commit)); } private void toSlime(ConvergenceSummary summary, Cursor summaryArray, boolean oldFormat) { summaryArray.addLong(summary.nodes()); summaryArray.addLong(summary.down()); summaryArray.addLong(summary.upgradingOs()); summaryArray.addLong(summary.upgradingFirmware()); summaryArray.addLong(summary.needPlatformUpgrade()); summaryArray.addLong(summary.upgradingPlatform()); summaryArray.addLong(summary.needReboot()); summaryArray.addLong(summary.rebooting()); summaryArray.addLong(summary.needRestart()); summaryArray.addLong(summary.restarting()); summaryArray.addLong(summary.services()); summaryArray.addLong(summary.needNewConfig()); if (!oldFormat) summaryArray.addLong(summary.retiring()); } static String valueOf(Step step) { switch (step) { case deployInitialReal : return "deployInitialReal"; case installInitialReal : return "installInitialReal"; case deployReal : return "deployReal"; case installReal : return "installReal"; case deactivateReal : return "deactivateReal"; case deployTester : return "deployTester"; case installTester : return "installTester"; case deactivateTester : return "deactivateTester"; case copyVespaLogs : return "copyVespaLogs"; case startStagingSetup : return "startStagingSetup"; case endStagingSetup : return "endStagingSetup"; case startTests : return "startTests"; case endTests : return "endTests"; case report : return "report"; default: throw new AssertionError("No value defined for '" + step + "'!"); } } static Step stepOf(String step) { switch (step) { case "deployInitialReal" : return deployInitialReal; case "installInitialReal" : return installInitialReal; case "deployReal" : return deployReal; case "installReal" : return installReal; case "deactivateReal" : return deactivateReal; case "deployTester" : return deployTester; case "installTester" : return installTester; case "deactivateTester" : return deactivateTester; case "copyVespaLogs" : return copyVespaLogs; case "startStagingSetup" : return startStagingSetup; case "endStagingSetup" : return endStagingSetup; case "startTests" : return startTests; case "endTests" : return endTests; case "report" : return report; default: throw new IllegalArgumentException("No step defined by '" + step + "'!"); } } static String valueOf(Status status) { switch (status) { case unfinished : return "unfinished"; case failed : return "failed"; case succeeded : return "succeeded"; default: throw new AssertionError("No value defined for '" + status + "'!"); } } static Status stepStatusOf(String status) { switch (status) { case "unfinished" : return unfinished; case "failed" : return failed; case "succeeded" : return succeeded; default: throw new IllegalArgumentException("No status defined by '" + status + "'!"); } } static Long valueOf(Instant instant) { return instant.toEpochMilli(); } static String valueOf(RunStatus status) { switch (status) { case running : return "running"; case outOfCapacity : return "outOfCapacity"; case endpointCertificateTimeout : return "endpointCertificateTimeout"; case deploymentFailed : return "deploymentFailed"; case installationFailed : return "installationFailed"; case testFailure : return "testFailure"; case error : return "error"; case success : return "success"; case aborted : return "aborted"; default: throw new AssertionError("No value defined for '" + status + "'!"); } } static RunStatus runStatusOf(String status) { switch (status) { case "running" : return running; case "outOfCapacity" : return outOfCapacity; case "endpointCertificateTimeout" : return endpointCertificateTimeout; case "deploymentFailed" : return deploymentFailed; case "installationFailed" : return installationFailed; case "testFailure" : return testFailure; case "error" : return error; case "success" : return success; case "aborted" : return aborted; default: throw new IllegalArgumentException("No run status defined by '" + status + "'!"); } } }
class RunSerializer { private static final String stepsField = "steps"; private static final String stepDetailsField = "stepDetails"; private static final String startTimeField = "startTime"; private static final String applicationField = "id"; private static final String jobTypeField = "type"; private static final String numberField = "number"; private static final String startField = "start"; private static final String endField = "end"; private static final String statusField = "status"; private static final String versionsField = "versions"; private static final String platformVersionField = "platform"; private static final String repositoryField = "repository"; private static final String branchField = "branch"; private static final String commitField = "commit"; private static final String authorEmailField = "authorEmail"; private static final String compileVersionField = "compileVersion"; private static final String buildTimeField = "buildTime"; private static final String sourceUrlField = "sourceUrl"; private static final String buildField = "build"; private static final String sourceField = "source"; private static final String lastTestRecordField = "lastTestRecord"; private static final String lastVespaLogTimestampField = "lastVespaLogTimestamp"; private static final String noNodesDownSinceField = "noNodesDownSince"; private static final String oldConvergenceSummaryField = "convergenceSummary"; private static final String convergenceSummaryField = "convergenceSummaryV2"; private static final String testerCertificateField = "testerCertificate"; Run runFromSlime(Slime slime) { return runFromSlime(slime.get()); } NavigableMap<RunId, Run> runsFromSlime(Slime slime) { NavigableMap<RunId, Run> runs = new TreeMap<>(comparing(RunId::number)); Inspector runArray = slime.get(); runArray.traverse((ArrayTraverser) (__, runObject) -> { Run run = runFromSlime(runObject); runs.put(run.id(), run); }); return runs; } private Run runFromSlime(Inspector runObject) { var steps = new EnumMap<Step, StepInfo>(Step.class); Inspector detailsField = runObject.field(stepDetailsField); runObject.field(stepsField).traverse((ObjectTraverser) (step, status) -> { Step typedStep = stepOf(step); Inspector stepDetailsField = detailsField.field(step); Inspector startTimeValue = stepDetailsField.field(startTimeField); Optional<Instant> startTime = Serializers.optionalInstant(startTimeValue); steps.put(typedStep, new StepInfo(typedStep, stepStatusOf(status.asString()), startTime)); }); return new Run(new RunId(ApplicationId.fromSerializedForm(runObject.field(applicationField).asString()), JobType.fromJobName(runObject.field(jobTypeField).asString()), runObject.field(numberField).asLong()), steps, versionsFromSlime(runObject.field(versionsField)), Serializers.instant(runObject.field(startField)), Serializers.optionalInstant(runObject.field(endField)), runStatusOf(runObject.field(statusField).asString()), runObject.field(lastTestRecordField).asLong(), Instant.EPOCH.plus(runObject.field(lastVespaLogTimestampField).asLong(), ChronoUnit.MICROS), Serializers.optionalInstant(runObject.field(noNodesDownSinceField)), convergenceSummaryFrom(runObject.field(convergenceSummaryField)) .or(() ->convergenceSummaryFrom(runObject.field(oldConvergenceSummaryField))), Optional.of(runObject.field(testerCertificateField)) .filter(Inspector::valid) .map(certificate -> X509CertificateUtils.fromPem(certificate.asString()))); } private Versions versionsFromSlime(Inspector versionsObject) { Version targetPlatformVersion = Version.fromString(versionsObject.field(platformVersionField).asString()); ApplicationVersion targetApplicationVersion = applicationVersionFrom(versionsObject); Optional<Version> sourcePlatformVersion = versionsObject.field(sourceField).valid() ? Optional.of(Version.fromString(versionsObject.field(sourceField).field(platformVersionField).asString())) : Optional.empty(); Optional<ApplicationVersion> sourceApplicationVersion = versionsObject.field(sourceField).valid() ? Optional.of(applicationVersionFrom(versionsObject.field(sourceField))) : Optional.empty(); return new Versions(targetPlatformVersion, targetApplicationVersion, sourcePlatformVersion, sourceApplicationVersion); } private ApplicationVersion applicationVersionFrom(Inspector versionObject) { if ( ! versionObject.field(buildField).valid()) return ApplicationVersion.unknown; long buildNumber = versionObject.field(buildField).asLong(); Optional<SourceRevision> source = Optional.of(new SourceRevision(versionObject.field(repositoryField).asString(), versionObject.field(branchField).asString(), versionObject.field(commitField).asString())) .filter(revision -> ! revision.commit().isBlank() && ! revision.repository().isBlank() && ! revision.branch().isBlank()); Optional<String> authorEmail = Serializers.optionalString(versionObject.field(authorEmailField)); Optional<Version> compileVersion = Serializers.optionalString(versionObject.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = Serializers.optionalInstant(versionObject.field(buildTimeField)); Optional<String> sourceUrl = Serializers.optionalString(versionObject.field(sourceUrlField)); Optional<String> commit = Serializers.optionalString(versionObject.field(commitField)); return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, compileVersion, buildTime, sourceUrl, commit); } Slime toSlime(Iterable<Run> runs) { Slime slime = new Slime(); Cursor runArray = slime.setArray(); runs.forEach(run -> toSlime(run, runArray.addObject())); return slime; } Slime toSlime(Run run) { Slime slime = new Slime(); toSlime(run, slime.setObject()); return slime; } private void toSlime(Run run, Cursor runObject) { runObject.setString(applicationField, run.id().application().serializedForm()); runObject.setString(jobTypeField, run.id().type().jobName()); runObject.setLong(numberField, run.id().number()); runObject.setLong(startField, run.start().toEpochMilli()); run.end().ifPresent(end -> runObject.setLong(endField, end.toEpochMilli())); runObject.setString(statusField, valueOf(run.status())); runObject.setLong(lastTestRecordField, run.lastTestLogEntry()); runObject.setLong(lastVespaLogTimestampField, Instant.EPOCH.until(run.lastVespaLogTimestamp(), ChronoUnit.MICROS)); run.noNodesDownSince().ifPresent(noNodesDownSince -> runObject.setLong(noNodesDownSinceField, noNodesDownSince.toEpochMilli())); run.convergenceSummary().ifPresent(convergenceSummary -> { toSlime(convergenceSummary, runObject.setArray(convergenceSummaryField), false); toSlime(convergenceSummary, runObject.setArray(oldConvergenceSummaryField), true); }); run.testerCertificate().ifPresent(certificate -> runObject.setString(testerCertificateField, X509CertificateUtils.toPem(certificate))); Cursor stepsObject = runObject.setObject(stepsField); run.steps().forEach((step, statusInfo) -> stepsObject.setString(valueOf(step), valueOf(statusInfo.status()))); Cursor stepDetailsObject = runObject.setObject(stepDetailsField); run.steps().forEach((step, statusInfo) -> statusInfo.startTime().ifPresent(startTime -> stepDetailsObject.setObject(valueOf(step)).setLong(startTimeField, valueOf(startTime)))); Cursor versionsObject = runObject.setObject(versionsField); toSlime(run.versions().targetPlatform(), run.versions().targetApplication(), versionsObject); run.versions().sourcePlatform().ifPresent(sourcePlatformVersion -> { toSlime(sourcePlatformVersion, run.versions().sourceApplication() .orElseThrow(() -> new IllegalArgumentException("Source versions must be both present or absent.")), versionsObject.setObject(sourceField)); }); } private void toSlime(Version platformVersion, ApplicationVersion applicationVersion, Cursor versionsObject) { versionsObject.setString(platformVersionField, platformVersion.toString()); applicationVersion.buildNumber().ifPresent(number -> versionsObject.setLong(buildField, number)); applicationVersion.source().map(SourceRevision::repository).ifPresent(repository -> versionsObject.setString(repositoryField, repository)); applicationVersion.source().map(SourceRevision::branch).ifPresent(branch -> versionsObject.setString(branchField, branch)); applicationVersion.source().map(SourceRevision::commit).ifPresent(commit -> versionsObject.setString(commitField, commit)); applicationVersion.authorEmail().ifPresent(email -> versionsObject.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> versionsObject.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> versionsObject.setLong(buildTimeField, time.toEpochMilli())); applicationVersion.sourceUrl().ifPresent(url -> versionsObject.setString(sourceUrlField, url)); applicationVersion.commit().ifPresent(commit -> versionsObject.setString(commitField, commit)); } private void toSlime(ConvergenceSummary summary, Cursor summaryArray, boolean oldFormat) { summaryArray.addLong(summary.nodes()); summaryArray.addLong(summary.down()); summaryArray.addLong(summary.upgradingOs()); summaryArray.addLong(summary.upgradingFirmware()); summaryArray.addLong(summary.needPlatformUpgrade()); summaryArray.addLong(summary.upgradingPlatform()); summaryArray.addLong(summary.needReboot()); summaryArray.addLong(summary.rebooting()); summaryArray.addLong(summary.needRestart()); summaryArray.addLong(summary.restarting()); summaryArray.addLong(summary.services()); summaryArray.addLong(summary.needNewConfig()); if (!oldFormat) summaryArray.addLong(summary.retiring()); } static String valueOf(Step step) { switch (step) { case deployInitialReal : return "deployInitialReal"; case installInitialReal : return "installInitialReal"; case deployReal : return "deployReal"; case installReal : return "installReal"; case deactivateReal : return "deactivateReal"; case deployTester : return "deployTester"; case installTester : return "installTester"; case deactivateTester : return "deactivateTester"; case copyVespaLogs : return "copyVespaLogs"; case startStagingSetup : return "startStagingSetup"; case endStagingSetup : return "endStagingSetup"; case startTests : return "startTests"; case endTests : return "endTests"; case report : return "report"; default: throw new AssertionError("No value defined for '" + step + "'!"); } } static Step stepOf(String step) { switch (step) { case "deployInitialReal" : return deployInitialReal; case "installInitialReal" : return installInitialReal; case "deployReal" : return deployReal; case "installReal" : return installReal; case "deactivateReal" : return deactivateReal; case "deployTester" : return deployTester; case "installTester" : return installTester; case "deactivateTester" : return deactivateTester; case "copyVespaLogs" : return copyVespaLogs; case "startStagingSetup" : return startStagingSetup; case "endStagingSetup" : return endStagingSetup; case "startTests" : return startTests; case "endTests" : return endTests; case "report" : return report; default: throw new IllegalArgumentException("No step defined by '" + step + "'!"); } } static String valueOf(Status status) { switch (status) { case unfinished : return "unfinished"; case failed : return "failed"; case succeeded : return "succeeded"; default: throw new AssertionError("No value defined for '" + status + "'!"); } } static Status stepStatusOf(String status) { switch (status) { case "unfinished" : return unfinished; case "failed" : return failed; case "succeeded" : return succeeded; default: throw new IllegalArgumentException("No status defined by '" + status + "'!"); } } static Long valueOf(Instant instant) { return instant.toEpochMilli(); } static String valueOf(RunStatus status) { switch (status) { case running : return "running"; case outOfCapacity : return "outOfCapacity"; case endpointCertificateTimeout : return "endpointCertificateTimeout"; case deploymentFailed : return "deploymentFailed"; case installationFailed : return "installationFailed"; case testFailure : return "testFailure"; case error : return "error"; case success : return "success"; case aborted : return "aborted"; default: throw new AssertionError("No value defined for '" + status + "'!"); } } static RunStatus runStatusOf(String status) { switch (status) { case "running" : return running; case "outOfCapacity" : return outOfCapacity; case "endpointCertificateTimeout" : return endpointCertificateTimeout; case "deploymentFailed" : return deploymentFailed; case "installationFailed" : return installationFailed; case "testFailure" : return testFailure; case "error" : return error; case "success" : return success; case "aborted" : return aborted; default: throw new IllegalArgumentException("No run status defined by '" + status + "'!"); } } }
Consider an empty description, which would mean it would be absent in the report.
private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> write(node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant), nodeLock)); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); if (op == DecommissionOperation.encrypt) { Report report = Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, "Host should be encrypted"); newHost = newHost.with(newHost.reports().withReport(report)); } result.add(write(newHost, lock)); } return result; }
Report report = Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, "Host should be encrypted");
private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> write(node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant), nodeLock)); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); if (op == DecommissionOperation.encrypt) { Report report = Report.basicReport(Report.WANT_TO_ENCRYPT_ID, Report.Type.UNSPECIFIED, instant, ""); newHost = newHost.with(newHost.reports().withReport(report)); } result.add(write(newHost, lock)); } return result; }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final Zone zone; private final Clock clock; private final CuratorDatabaseClient db; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) { this.zone = zone; this.clock = clock; this.db = db; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit();; return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(StateFilter.from(Node.State.active).and(filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } /** Retire and encrypt given host and all of its children */ public List<Node> encrypt(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.encrypt, agent, instant); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, encrypt, } }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final Zone zone; private final Clock clock; private final CuratorDatabaseClient db; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock) { this.zone = zone; this.clock = clock; this.db = db; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit();; return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(StateFilter.from(Node.State.active).and(filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } /** Retire and encrypt given host and all of its children */ public List<Node> encrypt(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.encrypt, agent, instant); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, encrypt, } }
```suggestion "' defined twice with identical expression (illegal and will be enforced soon) '" + expression.getFileName() + "'."); ```
public void add(RankExpressionFile expression, DeployLogger deployLogger) { expression.validate(); String name = expression.getName(); if (expressions.containsKey(name)) { if ( expressions.get(name).getFileName().equals(expression.getFileName()) ) { deployLogger.logApplicationPackage(Level.WARNING, "Rank expression file '" + name + "' defined twice with identical expression (illegal and will be enfoced soon) '" + expression.getFileName() + "'."); } else { throw new IllegalArgumentException("Rank expression file '" + name + "' defined twice (illegal but not enforced), but redefinition is not matching (illegal and enforced), " + "previous = '" + expressions.get(name).getFileName() + "', new = '" + expression.getFileName() + "'."); } } expressions.put(name, expression); }
"' defined twice with identical expression (illegal and will be enfoced soon) '" + expression.getFileName() + "'.");
public void add(RankExpressionFile expression, DeployLogger deployLogger) { expression.validate(); String name = expression.getName(); if (expressions.containsKey(name)) { if ( expressions.get(name).getFileName().equals(expression.getFileName()) ) { deployLogger.logApplicationPackage(Level.WARNING, "Rank expression file '" + name + "' defined twice with identical expression (illegal and will be enforced soon) '" + expression.getFileName() + "'."); } else { throw new IllegalArgumentException("Rank expression file '" + name + "' defined twice (illegal but not enforced), but redefinition is not matching (illegal and enforced), " + "previous = '" + expressions.get(name).getFileName() + "', new = '" + expression.getFileName() + "'."); } } expressions.put(name, expression); }
class RankExpressionFiles { private final Map<String, RankExpressionFile> expressions = new HashMap<>(); /** Returns the ranking constant with the given name, or null if not present */ public RankExpressionFile get(String name) { return expressions.get(name); } /** Returns a read-only map of the ranking constants in this indexed by name */ public Map<String, RankExpressionFile> asMap() { return Collections.unmodifiableMap(expressions); } /** Initiate sending of these constants to some services over file distribution */ public void sendTo(Collection<? extends AbstractService> services) { expressions.values().forEach(constant -> constant.sendTo(services)); } }
class RankExpressionFiles { private final Map<String, RankExpressionFile> expressions = new HashMap<>(); /** Returns the ranking constant with the given name, or null if not present */ public RankExpressionFile get(String name) { return expressions.get(name); } /** Returns a read-only map of the ranking constants in this indexed by name */ public Map<String, RankExpressionFile> asMap() { return Collections.unmodifiableMap(expressions); } /** Initiate sending of these constants to some services over file distribution */ public void sendTo(Collection<? extends AbstractService> services) { expressions.values().forEach(constant -> constant.sendTo(services)); } }
Buffering already done in the json class, so no need for that here, it will only be overhead.
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException { return new BufferedInputStream( cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get())); }
return new BufferedInputStream(
private InputStream createFeedInputStream(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException { return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get()); }
class CliClient { private final PrintStream systemOut; private final PrintStream systemError; private final InputStream systemIn; private final Properties systemProperties; private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn, Properties systemProperties) { this.systemOut = systemOut; this.systemError = systemError; this.systemIn = systemIn; this.systemProperties = systemProperties; } public static void main(String[] args) { CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties()); int exitCode = client.run(args); System.exit(exitCode); } private int run(String[] rawArgs) { try { CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs); if (cliArgs.helpSpecified()) { cliArgs.printHelp(systemOut); return 0; } if (cliArgs.versionSpecified()) { systemOut.println(Vespa.VERSION); return 0; } try (InputStream in = createFeedInputStream(cliArgs); JsonStreamFeeder feeder = createJsonFeeder(cliArgs)) { feeder.feed(in); } return 0; } catch (CliArguments.CliArgumentsException | IOException e) { return handleException(e); } } private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException { FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint()); cliArgs.connections().ifPresent(builder::setMaxConnections); cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxConnections); if (cliArgs.sslHostnameVerificationDisabled()) { builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE); } CliArguments.CertificateAndKey certificateAndKey = cliArgs.certificateAndKey().orElse(null); Path caCertificates = cliArgs.caCertificates().orElse(null); if (certificateAndKey != null || caCertificates != null) { SslContextBuilder sslContextBuilder = new SslContextBuilder(); if (certificateAndKey != null) { sslContextBuilder.withCertificateAndKey(certificateAndKey.certificateFile, certificateAndKey.privateKeyFile); } if (caCertificates != null) { sslContextBuilder.withCaCertificates(caCertificates); } builder.setSslContext(sslContextBuilder.build()); } cliArgs.headers().forEach(builder::addRequestHeader); return builder.build(); } private static JsonStreamFeeder createJsonFeeder(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException { FeedClient feedClient = createFeedClient(cliArgs); JsonStreamFeeder.Builder builder = JsonStreamFeeder.builder(feedClient); cliArgs.timeout().ifPresent(builder::withTimeout); cliArgs.route().ifPresent(builder::withRoute); cliArgs.traceLevel().ifPresent(builder::withTracelevel); return builder.build(); } private int handleException(Exception e) { return handleException(e.getMessage(), e); } private int handleException(String message, Exception exception) { systemError.println(message); if (debugMode()) { exception.printStackTrace(systemError); } return 1; } private boolean debugMode() { return Boolean.parseBoolean(systemProperties.getProperty("VESPA_DEBUG", Boolean.FALSE.toString())); } private static class AcceptAllHostnameVerifier implements HostnameVerifier { static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier(); @Override public boolean verify(String hostname, SSLSession session) { return true; } } }
class CliClient { private final PrintStream systemOut; private final PrintStream systemError; private final InputStream systemIn; private final Properties systemProperties; private CliClient(PrintStream systemOut, PrintStream systemError, InputStream systemIn, Properties systemProperties) { this.systemOut = systemOut; this.systemError = systemError; this.systemIn = systemIn; this.systemProperties = systemProperties; } public static void main(String[] args) { CliClient client = new CliClient(System.out, System.err, System.in, System.getProperties()); int exitCode = client.run(args); System.exit(exitCode); } private int run(String[] rawArgs) { CliArguments cliArgs = null; try { cliArgs = CliArguments.fromRawArgs(rawArgs); if (cliArgs.helpSpecified()) { cliArgs.printHelp(systemOut); return 0; } if (cliArgs.versionSpecified()) { systemOut.println(Vespa.VERSION); return 0; } try (InputStream in = createFeedInputStream(cliArgs); JsonStreamFeeder feeder = createJsonFeeder(cliArgs)) { if (cliArgs.benchmarkModeEnabled()) { printBenchmarkResult(feeder.benchmark(in)); } else { feeder.feed(in); } } return 0; } catch (CliArguments.CliArgumentsException | IOException e) { boolean verbose = cliArgs != null && cliArgs.verboseSpecified(); return handleException(verbose, e); } catch (Exception e) { boolean verbose = cliArgs != null && cliArgs.verboseSpecified(); return handleException(verbose, "Unknown failure: " + e.getMessage(), e); } } private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException { FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint()); cliArgs.connections().ifPresent(builder::setMaxConnections); cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxConnections); if (cliArgs.sslHostnameVerificationDisabled()) { builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE); } cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile)); cliArgs.caCertificates().ifPresent(builder::setCaCertificates); cliArgs.headers().forEach(builder::addRequestHeader); return builder.build(); } private static JsonStreamFeeder createJsonFeeder(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException { FeedClient feedClient = createFeedClient(cliArgs); JsonStreamFeeder.Builder builder = JsonStreamFeeder.builder(feedClient); cliArgs.timeout().ifPresent(builder::withTimeout); cliArgs.route().ifPresent(builder::withRoute); cliArgs.traceLevel().ifPresent(builder::withTracelevel); return builder.build(); } private void printBenchmarkResult(JsonStreamFeeder.BenchmarkResult result) throws IOException { JsonFactory factory = new JsonFactory(); try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) { generator.writeStartObject(); generator.writeNumberField("feeder.runtime", result.duration.toMillis()); generator.writeNumberField("feeder.okcount", result.okCount); generator.writeNumberField("feeder.errorcount", result.errorCount); generator.writeNumberField("feeder.throughput", result.throughput); generator.writeEndObject(); } } private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); } private int handleException(boolean verbose, String message, Exception exception) { systemError.println(message); if (debugMode() || verbose) { exception.printStackTrace(systemError); } return 1; } private boolean debugMode() { return Boolean.parseBoolean(systemProperties.getProperty("VESPA_DEBUG", Boolean.FALSE.toString())); } private static class AcceptAllHostnameVerifier implements HostnameVerifier { static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier(); @Override public boolean verify(String hostname, SSLSession session) { return true; } } }