comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Rename variable? | public Object evaluate(DocumentOperation op) {
DocumentType doct;
if (op instanceof DocumentPut) {
doct = ((DocumentPut)op).getDocument().getDataType();
} else if (op instanceof DocumentUpdate) {
doct = ((DocumentUpdate)op).getDocumentType();
} else if (op instanceof DocumentRemove) {
DocumentRemove removeOp = (DocumentRemove)op;
return (removeOp.getId().getDocType().equals(type) ? op : Boolean.FALSE);
} else if (op instanceof DocumentGet) {
DocumentGet removeOp = (DocumentGet)op;
return (removeOp.getId().getDocType().equals(type) ? op : Boolean.FALSE);
} else {
throw new IllegalStateException("Document class '" + op.getClass().getName() + "' is not supported.");
}
return doct.isA(this.type) ? op : Boolean.FALSE;
} | DocumentGet removeOp = (DocumentGet)op; | public Object evaluate(DocumentOperation op) {
DocumentType doct;
if (op instanceof DocumentPut) {
doct = ((DocumentPut)op).getDocument().getDataType();
} else if (op instanceof DocumentUpdate) {
doct = ((DocumentUpdate)op).getDocumentType();
} else if (op instanceof DocumentRemove) {
DocumentRemove removeOp = (DocumentRemove)op;
return (removeOp.getId().getDocType().equals(type) ? op : Boolean.FALSE);
} else if (op instanceof DocumentGet) {
DocumentGet getOp = (DocumentGet)op;
return (getOp.getId().getDocType().equals(type) ? op : Boolean.FALSE);
} else {
throw new IllegalStateException("Document class '" + op.getClass().getName() + "' is not supported.");
}
return doct.isA(this.type) ? op : Boolean.FALSE;
} | class DocumentNode implements ExpressionNode {
private String type;
public DocumentNode(String type) {
this.type = type;
}
public String getType() {
return type;
}
public DocumentNode setType(String type) {
this.type = type;
return this;
}
@Override
public BucketSet getBucketSet(BucketIdFactory factory) {
return null;
}
@Override
public Object evaluate(Context context) {
return evaluate(context.getDocumentOperation());
}
public void accept(Visitor visitor) {
visitor.visit(this);
}
@Override
public String toString() {
return type;
}
@Override
public OrderingSpecification getOrdering(int order) {
return null;
}
} | class DocumentNode implements ExpressionNode {
private String type;
public DocumentNode(String type) {
this.type = type;
}
public String getType() {
return type;
}
public DocumentNode setType(String type) {
this.type = type;
return this;
}
@Override
public BucketSet getBucketSet(BucketIdFactory factory) {
return null;
}
@Override
public Object evaluate(Context context) {
return evaluate(context.getDocumentOperation());
}
public void accept(Visitor visitor) {
visitor.visit(this);
}
@Override
public String toString() {
return type;
}
@Override
public OrderingSpecification getOrdering(int order) {
return null;
}
} |
Consider `UnsupportedOperationException` instead | public void setCondition(TestAndSetCondition condition) {
throw new IllegalStateException("conditional DocumentGet is not supported");
} | throw new IllegalStateException("conditional DocumentGet is not supported"); | public void setCondition(TestAndSetCondition condition) {
throw new UnsupportedOperationException("conditional DocumentGet is not supported");
} | class DocumentGet extends DocumentOperation {
private final DocumentId docId;
public DocumentGet(DocumentId docId) { this.docId = docId; }
@Override
public DocumentId getId() { return docId; }
@Override
@Override
public String toString() {
return "DocumentGet '" + docId + "'";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof DocumentGet)) return false;
DocumentGet that = (DocumentGet) o;
if (!docId.equals(that.docId)) return false;
return true;
}
@Override
public int hashCode() {
return docId.hashCode();
}
} | class DocumentGet extends DocumentOperation {
private final DocumentId docId;
public DocumentGet(DocumentId docId) { this.docId = docId; }
@Override
public DocumentId getId() { return docId; }
@Override
@Override
public String toString() {
return "DocumentGet '" + docId + "'";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof DocumentGet)) return false;
DocumentGet that = (DocumentGet) o;
if (!docId.equals(that.docId)) return false;
return true;
}
@Override
public int hashCode() {
return docId.hashCode();
}
} |
We agreed on throwing IllegalArgumentException here | private String getJvmOptions(ContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions = "";
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
deployLogger.log(Level.WARNING, "You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. 'jvmargs' will be ignored");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.log(Level.WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.CMS);
}
}
return jvmOptions;
} | deployLogger.log(Level.WARNING, "You have specified both jvm-options='" + jvmOptions + "'" + | private String getJvmOptions(ContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions = "";
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options.");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.log(Level.WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.CMS);
}
}
return jvmOptions;
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
/**
* Default path to vip status file for container in Hosted Vespa.
*/
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/mediasearch/oor/status.html");
/**
* Path to vip status file for container in Hosted Vespa. Only used if set, else use HOSTED_VESPA_STATUS_FILE
*/
private static final String HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING = "cloudconfig_server__tenant_vip_status_file";
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName("container"), ConfigModelId.fromName("jdisc"));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
app = modelContext.getApplicationPackage();
checkVersion(spec);
this.log = modelContext.getDeployLogger();
ContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private void addBundlesForPlatformComponents(ContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ContainerCluster>() {
@Override
protected ContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec);
addHandlers(deployState, cluster, spec);
addRestApis(deployState, spec, cluster);
addServlets(deployState, spec, cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
addDefaultHandlers(cluster);
addStatusHandlers(cluster, context);
setDefaultMetricConsumerFactory(cluster);
addHttp(deployState, spec, cluster);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addLegacyFilters(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
}
private void addSecretStore(ContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ContainerCluster cluster, ConfigModelContext context) {
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), deploymentSpec);
});
}
private void addRotationProperties(ContainerCluster cluster, Zone zone, Set<Rotation> rotations, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, rotations, spec.globalServiceId(), cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
return spec.zones().stream()
.anyMatch(declaredZone -> declaredZone.deploysTo(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<Rotation> rotations, Optional<String> globalServiceId, String containerClusterName) {
if ( ! rotations.isEmpty() && globalServiceId.isPresent()) {
if (containerClusterName.equals(globalServiceId.get())) {
container.setProp("rotations", rotations.stream().map(Rotation::getId).collect(Collectors.joining(",")));
}
}
}
private void addRoutingAliases(ContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
private void setDefaultMetricConsumerFactory(ContainerCluster cluster) {
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.STATE_MONITOR);
}
private void addDefaultHandlers(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
}
private void addStatusHandlers(ContainerCluster cluster, ConfigModelContext configModelContext) {
if (configModelContext.getDeployState().isHosted()) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
/**
* Intended for use by legacy builders only.
* Will be called during building when using ContainerModelBuilder.
*/
public static void addDefaultHandler_legacyBuilder(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
cluster.addVipHandler();
}
private static void addDefaultHandlersExceptStatus(ContainerCluster cluster) {
cluster.addDefaultRootHandler();
cluster.addMetricStateHandler();
cluster.addApplicationStatusHandler();
cluster.addStatisticsHandler();
}
private void addClientProviders(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder().build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
private void addLegacyFilters(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Component component : buildLegacyFilters(deployState, cluster, spec)) {
cluster.addComponent(component);
}
}
private List<Component> buildLegacyFilters(DeployState deployState, AbstractConfigProducer ancestor, Element spec) {
List<Component> components = new ArrayList<>();
for (Element node : XML.getChildren(spec, "filter")) {
components.add(new DomFilterBuilder().build(deployState, ancestor, node));
}
return components;
}
private void addAccessLogs(DeployState deployState, ContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && cluster.getSearch() != null)
cluster.addDefaultSearchAccessLog();
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
private void addHttp(DeployState deployState, Element spec, ContainerCluster cluster) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
}
private Http buildHttp(DeployState deployState, ContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addRestApis(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(deployState, cluster, restApiElem));
}
}
private void addServlets(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private void addProcessing(DeployState deployState, Element spec, ContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addHandlers(DeployState deployState, ContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder().build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void addNodes(ContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ContainerCluster cluster) {
Container container = new Container(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa());
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(Zone zone, String jvmGCOPtions, boolean isHostedVespa) {
if (jvmGCOPtions != null) {
return jvmGCOPtions;
} else if (zone.system() == SystemName.dev) {
return ContainerCluster.G1GC;
} else if (isHostedVespa) {
return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region()))
? ContainerCluster.G1GC : ContainerCluster.CMS;
} else {
return ContainerCluster.CMS;
}
}
private void addNodesFromXml(ContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
Container node = new Container(cluster, "container.0", 0, cluster.isHostedVespa());
HostResource host = allocateSingleNodeHost(cluster, log, containerElement, context);
node.setHostResource(host);
node.initService(context.getDeployLogger());
cluster.addContainers(Collections.singleton(node));
} else {
List<Container> nodes = createNodes(cluster, nodesElement, context);
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if ( !cluster.getJvmGCOptions().isPresent()) {
String jvmGCOptions = nodesElement.hasAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
? nodesElement.getAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private List<Container> createNodes(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private void applyRoutingAliasProperties(List<Container> result, ContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private void applyMemoryPercentage(ContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Creates a single host when there is no nodes tag */
private HostResource allocateSingleNodeHost(ContainerCluster cluster, DeployLogger logger, Element containerElement, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.getHostSystem();
if (deployState.isHosted()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleContentHost.get();
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
deployState.getWantedNodeVespaVersion(),
false);
Capacity capacity = Capacity.fromNodeCount(1,
Optional.empty(),
false,
! deployState.getProperties().isBootstrap());
return hostSystem.allocateHosts(clusterSpec, capacity, 1, logger).keySet().iterator().next();
}
} else {
return hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC);
}
}
private List<Container> createNodesFromNodeCount(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().getHostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<Container> createNodesFromNodeType(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
context.getDeployState().getWantedNodeVespaVersion(),
false);
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().getHostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<Container> createNodesFromContentServiceReference(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
String referenceId = nodesElement.getAttribute("of");
Element services = servicesRootOf(nodesElement).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
Element referencedService = findChildById(services, referenceId).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
if ( ! referencedService.getTagName().equals("content"))
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "', " +
"but that is not a content service");
Element referencedNodesElement = XML.getChild(referencedService, "nodes");
if (referencedNodesElement == null)
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "' to supply nodes, " +
"but that service has no <nodes> element");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(NodesSpecification.from(new ModelElement(referencedNodesElement), context),
referenceId,
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context);
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<Container> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ContainerCluster cluster) {
List<Container> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
Container container = new Container(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa());
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<Container> createNodesFromNodeList(DeployState deployState, ContainerCluster cluster, Element nodesElement) {
List<Container> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private IllegalArgumentException clusterReferenceNotFoundException(ContainerCluster cluster, String referenceId) {
return new IllegalArgumentException(cluster + " references service '" + referenceId +
"' but this service is not defined");
}
private Optional<Element> findChildById(Element parent, String id) {
for (Element child : XML.getChildren(parent))
if (id.equals(child.getAttribute("id"))) return Optional.of(child);
return Optional.empty();
}
private boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private void applyNodesTagJvmArgs(List<Container> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private void applyDefaultPreload(List<Container> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ContainerCluster cluster, Element searchElement) {
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(
cluster.getSearch().getChains(), "com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {"http:
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private void addGUIHandler(ContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings("http:
cluster.addComponent(guiHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec, String componentName, Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain().ifPresent(domain -> {
AthenzService service = spec.athenzService(zone.environment(), zone.region())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/**
* Disallow renderers named "DefaultRenderer" or "JsonRenderer"
*/
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
/**
* Default path to vip status file for container in Hosted Vespa.
*/
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/mediasearch/oor/status.html");
/**
* Path to vip status file for container in Hosted Vespa. Only used if set, else use HOSTED_VESPA_STATUS_FILE
*/
private static final String HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING = "cloudconfig_server__tenant_vip_status_file";
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName("container"), ConfigModelId.fromName("jdisc"));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
app = modelContext.getApplicationPackage();
checkVersion(spec);
this.log = modelContext.getDeployLogger();
ContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private void addBundlesForPlatformComponents(ContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ContainerCluster>() {
@Override
protected ContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec);
addHandlers(deployState, cluster, spec);
addRestApis(deployState, spec, cluster);
addServlets(deployState, spec, cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
addDefaultHandlers(cluster);
addStatusHandlers(cluster, context);
setDefaultMetricConsumerFactory(cluster);
addHttp(deployState, spec, cluster);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addLegacyFilters(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
}
private void addSecretStore(ContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ContainerCluster cluster, ConfigModelContext context) {
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), deploymentSpec);
});
}
private void addRotationProperties(ContainerCluster cluster, Zone zone, Set<Rotation> rotations, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, rotations, spec.globalServiceId(), cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
return spec.zones().stream()
.anyMatch(declaredZone -> declaredZone.deploysTo(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<Rotation> rotations, Optional<String> globalServiceId, String containerClusterName) {
if ( ! rotations.isEmpty() && globalServiceId.isPresent()) {
if (containerClusterName.equals(globalServiceId.get())) {
container.setProp("rotations", rotations.stream().map(Rotation::getId).collect(Collectors.joining(",")));
}
}
}
private void addRoutingAliases(ContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
private void setDefaultMetricConsumerFactory(ContainerCluster cluster) {
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.STATE_MONITOR);
}
private void addDefaultHandlers(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
}
private void addStatusHandlers(ContainerCluster cluster, ConfigModelContext configModelContext) {
if (configModelContext.getDeployState().isHosted()) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
/**
* Intended for use by legacy builders only.
* Will be called during building when using ContainerModelBuilder.
*/
public static void addDefaultHandler_legacyBuilder(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
cluster.addVipHandler();
}
private static void addDefaultHandlersExceptStatus(ContainerCluster cluster) {
cluster.addDefaultRootHandler();
cluster.addMetricStateHandler();
cluster.addApplicationStatusHandler();
cluster.addStatisticsHandler();
}
private void addClientProviders(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder().build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
private void addLegacyFilters(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Component component : buildLegacyFilters(deployState, cluster, spec)) {
cluster.addComponent(component);
}
}
private List<Component> buildLegacyFilters(DeployState deployState, AbstractConfigProducer ancestor, Element spec) {
List<Component> components = new ArrayList<>();
for (Element node : XML.getChildren(spec, "filter")) {
components.add(new DomFilterBuilder().build(deployState, ancestor, node));
}
return components;
}
private void addAccessLogs(DeployState deployState, ContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && cluster.getSearch() != null)
cluster.addDefaultSearchAccessLog();
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
private void addHttp(DeployState deployState, Element spec, ContainerCluster cluster) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
}
private Http buildHttp(DeployState deployState, ContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addRestApis(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(deployState, cluster, restApiElem));
}
}
private void addServlets(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private void addProcessing(DeployState deployState, Element spec, ContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addHandlers(DeployState deployState, ContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder().build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void addNodes(ContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ContainerCluster cluster) {
Container container = new Container(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa());
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(Zone zone, String jvmGCOPtions, boolean isHostedVespa) {
if (jvmGCOPtions != null) {
return jvmGCOPtions;
} else if (zone.system() == SystemName.dev) {
return ContainerCluster.G1GC;
} else if (isHostedVespa) {
return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region()))
? ContainerCluster.G1GC : ContainerCluster.CMS;
} else {
return ContainerCluster.CMS;
}
}
private void addNodesFromXml(ContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
Container node = new Container(cluster, "container.0", 0, cluster.isHostedVespa());
HostResource host = allocateSingleNodeHost(cluster, log, containerElement, context);
node.setHostResource(host);
node.initService(context.getDeployLogger());
cluster.addContainers(Collections.singleton(node));
} else {
List<Container> nodes = createNodes(cluster, nodesElement, context);
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if ( !cluster.getJvmGCOptions().isPresent()) {
String jvmGCOptions = nodesElement.hasAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
? nodesElement.getAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private List<Container> createNodes(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private void applyRoutingAliasProperties(List<Container> result, ContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private void applyMemoryPercentage(ContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Creates a single host when there is no nodes tag */
private HostResource allocateSingleNodeHost(ContainerCluster cluster, DeployLogger logger, Element containerElement, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.getHostSystem();
if (deployState.isHosted()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleContentHost.get();
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
deployState.getWantedNodeVespaVersion(),
false);
Capacity capacity = Capacity.fromNodeCount(1,
Optional.empty(),
false,
! deployState.getProperties().isBootstrap());
return hostSystem.allocateHosts(clusterSpec, capacity, 1, logger).keySet().iterator().next();
}
} else {
return hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC);
}
}
private List<Container> createNodesFromNodeCount(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().getHostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<Container> createNodesFromNodeType(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
context.getDeployState().getWantedNodeVespaVersion(),
false);
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().getHostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<Container> createNodesFromContentServiceReference(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
String referenceId = nodesElement.getAttribute("of");
Element services = servicesRootOf(nodesElement).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
Element referencedService = findChildById(services, referenceId).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
if ( ! referencedService.getTagName().equals("content"))
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "', " +
"but that is not a content service");
Element referencedNodesElement = XML.getChild(referencedService, "nodes");
if (referencedNodesElement == null)
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "' to supply nodes, " +
"but that service has no <nodes> element");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(NodesSpecification.from(new ModelElement(referencedNodesElement), context),
referenceId,
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context);
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<Container> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ContainerCluster cluster) {
List<Container> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
Container container = new Container(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa());
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<Container> createNodesFromNodeList(DeployState deployState, ContainerCluster cluster, Element nodesElement) {
List<Container> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private IllegalArgumentException clusterReferenceNotFoundException(ContainerCluster cluster, String referenceId) {
return new IllegalArgumentException(cluster + " references service '" + referenceId +
"' but this service is not defined");
}
private Optional<Element> findChildById(Element parent, String id) {
for (Element child : XML.getChildren(parent))
if (id.equals(child.getAttribute("id"))) return Optional.of(child);
return Optional.empty();
}
private boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private void applyNodesTagJvmArgs(List<Container> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private void applyDefaultPreload(List<Container> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ContainerCluster cluster, Element searchElement) {
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(
cluster.getSearch().getChains(), "com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {"http:
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private void addGUIHandler(ContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings("http:
cluster.addComponent(guiHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec, String componentName, Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain().ifPresent(domain -> {
AthenzService service = spec.athenzService(zone.environment(), zone.region())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/**
* Disallow renderers named "DefaultRenderer" or "JsonRenderer"
*/
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
} |
Fixed | private String getJvmOptions(ContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions = "";
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
deployLogger.log(Level.WARNING, "You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. 'jvmargs' will be ignored");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.log(Level.WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.CMS);
}
}
return jvmOptions;
} | deployLogger.log(Level.WARNING, "You have specified both jvm-options='" + jvmOptions + "'" + | private String getJvmOptions(ContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions = "";
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options.");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.log(Level.WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.CMS);
}
}
return jvmOptions;
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
/**
* Default path to vip status file for container in Hosted Vespa.
*/
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/mediasearch/oor/status.html");
/**
* Path to vip status file for container in Hosted Vespa. Only used if set, else use HOSTED_VESPA_STATUS_FILE
*/
private static final String HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING = "cloudconfig_server__tenant_vip_status_file";
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName("container"), ConfigModelId.fromName("jdisc"));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
app = modelContext.getApplicationPackage();
checkVersion(spec);
this.log = modelContext.getDeployLogger();
ContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private void addBundlesForPlatformComponents(ContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ContainerCluster>() {
@Override
protected ContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec);
addHandlers(deployState, cluster, spec);
addRestApis(deployState, spec, cluster);
addServlets(deployState, spec, cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
addDefaultHandlers(cluster);
addStatusHandlers(cluster, context);
setDefaultMetricConsumerFactory(cluster);
addHttp(deployState, spec, cluster);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addLegacyFilters(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
}
private void addSecretStore(ContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ContainerCluster cluster, ConfigModelContext context) {
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), deploymentSpec);
});
}
private void addRotationProperties(ContainerCluster cluster, Zone zone, Set<Rotation> rotations, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, rotations, spec.globalServiceId(), cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
return spec.zones().stream()
.anyMatch(declaredZone -> declaredZone.deploysTo(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<Rotation> rotations, Optional<String> globalServiceId, String containerClusterName) {
if ( ! rotations.isEmpty() && globalServiceId.isPresent()) {
if (containerClusterName.equals(globalServiceId.get())) {
container.setProp("rotations", rotations.stream().map(Rotation::getId).collect(Collectors.joining(",")));
}
}
}
private void addRoutingAliases(ContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
private void setDefaultMetricConsumerFactory(ContainerCluster cluster) {
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.STATE_MONITOR);
}
private void addDefaultHandlers(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
}
private void addStatusHandlers(ContainerCluster cluster, ConfigModelContext configModelContext) {
if (configModelContext.getDeployState().isHosted()) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
/**
* Intended for use by legacy builders only.
* Will be called during building when using ContainerModelBuilder.
*/
public static void addDefaultHandler_legacyBuilder(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
cluster.addVipHandler();
}
private static void addDefaultHandlersExceptStatus(ContainerCluster cluster) {
cluster.addDefaultRootHandler();
cluster.addMetricStateHandler();
cluster.addApplicationStatusHandler();
cluster.addStatisticsHandler();
}
private void addClientProviders(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder().build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
private void addLegacyFilters(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Component component : buildLegacyFilters(deployState, cluster, spec)) {
cluster.addComponent(component);
}
}
private List<Component> buildLegacyFilters(DeployState deployState, AbstractConfigProducer ancestor, Element spec) {
List<Component> components = new ArrayList<>();
for (Element node : XML.getChildren(spec, "filter")) {
components.add(new DomFilterBuilder().build(deployState, ancestor, node));
}
return components;
}
private void addAccessLogs(DeployState deployState, ContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && cluster.getSearch() != null)
cluster.addDefaultSearchAccessLog();
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
private void addHttp(DeployState deployState, Element spec, ContainerCluster cluster) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
}
private Http buildHttp(DeployState deployState, ContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addRestApis(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(deployState, cluster, restApiElem));
}
}
private void addServlets(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private void addProcessing(DeployState deployState, Element spec, ContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addHandlers(DeployState deployState, ContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder().build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void addNodes(ContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ContainerCluster cluster) {
Container container = new Container(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa());
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(Zone zone, String jvmGCOPtions, boolean isHostedVespa) {
if (jvmGCOPtions != null) {
return jvmGCOPtions;
} else if (zone.system() == SystemName.dev) {
return ContainerCluster.G1GC;
} else if (isHostedVespa) {
return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region()))
? ContainerCluster.G1GC : ContainerCluster.CMS;
} else {
return ContainerCluster.CMS;
}
}
private void addNodesFromXml(ContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
Container node = new Container(cluster, "container.0", 0, cluster.isHostedVespa());
HostResource host = allocateSingleNodeHost(cluster, log, containerElement, context);
node.setHostResource(host);
node.initService(context.getDeployLogger());
cluster.addContainers(Collections.singleton(node));
} else {
List<Container> nodes = createNodes(cluster, nodesElement, context);
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if ( !cluster.getJvmGCOptions().isPresent()) {
String jvmGCOptions = nodesElement.hasAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
? nodesElement.getAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private List<Container> createNodes(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private void applyRoutingAliasProperties(List<Container> result, ContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private void applyMemoryPercentage(ContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Creates a single host when there is no nodes tag */
private HostResource allocateSingleNodeHost(ContainerCluster cluster, DeployLogger logger, Element containerElement, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.getHostSystem();
if (deployState.isHosted()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleContentHost.get();
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
deployState.getWantedNodeVespaVersion(),
false);
Capacity capacity = Capacity.fromNodeCount(1,
Optional.empty(),
false,
! deployState.getProperties().isBootstrap());
return hostSystem.allocateHosts(clusterSpec, capacity, 1, logger).keySet().iterator().next();
}
} else {
return hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC);
}
}
private List<Container> createNodesFromNodeCount(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().getHostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<Container> createNodesFromNodeType(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
context.getDeployState().getWantedNodeVespaVersion(),
false);
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().getHostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<Container> createNodesFromContentServiceReference(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
String referenceId = nodesElement.getAttribute("of");
Element services = servicesRootOf(nodesElement).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
Element referencedService = findChildById(services, referenceId).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
if ( ! referencedService.getTagName().equals("content"))
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "', " +
"but that is not a content service");
Element referencedNodesElement = XML.getChild(referencedService, "nodes");
if (referencedNodesElement == null)
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "' to supply nodes, " +
"but that service has no <nodes> element");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(NodesSpecification.from(new ModelElement(referencedNodesElement), context),
referenceId,
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context);
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<Container> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ContainerCluster cluster) {
List<Container> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
Container container = new Container(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa());
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<Container> createNodesFromNodeList(DeployState deployState, ContainerCluster cluster, Element nodesElement) {
List<Container> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private IllegalArgumentException clusterReferenceNotFoundException(ContainerCluster cluster, String referenceId) {
return new IllegalArgumentException(cluster + " references service '" + referenceId +
"' but this service is not defined");
}
private Optional<Element> findChildById(Element parent, String id) {
for (Element child : XML.getChildren(parent))
if (id.equals(child.getAttribute("id"))) return Optional.of(child);
return Optional.empty();
}
private boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private void applyNodesTagJvmArgs(List<Container> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private void applyDefaultPreload(List<Container> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ContainerCluster cluster, Element searchElement) {
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(
cluster.getSearch().getChains(), "com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {"http:
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private void addGUIHandler(ContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings("http:
cluster.addComponent(guiHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec, String componentName, Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain().ifPresent(domain -> {
AthenzService service = spec.athenzService(zone.environment(), zone.region())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/**
* Disallow renderers named "DefaultRenderer" or "JsonRenderer"
*/
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
/**
* Default path to vip status file for container in Hosted Vespa.
*/
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/mediasearch/oor/status.html");
/**
* Path to vip status file for container in Hosted Vespa. Only used if set, else use HOSTED_VESPA_STATUS_FILE
*/
private static final String HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING = "cloudconfig_server__tenant_vip_status_file";
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName("container"), ConfigModelId.fromName("jdisc"));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
app = modelContext.getApplicationPackage();
checkVersion(spec);
this.log = modelContext.getDeployLogger();
ContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private void addBundlesForPlatformComponents(ContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ContainerCluster>() {
@Override
protected ContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec);
addHandlers(deployState, cluster, spec);
addRestApis(deployState, spec, cluster);
addServlets(deployState, spec, cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
addDefaultHandlers(cluster);
addStatusHandlers(cluster, context);
setDefaultMetricConsumerFactory(cluster);
addHttp(deployState, spec, cluster);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addLegacyFilters(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
}
private void addSecretStore(ContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ContainerCluster cluster, ConfigModelContext context) {
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), deploymentSpec);
});
}
private void addRotationProperties(ContainerCluster cluster, Zone zone, Set<Rotation> rotations, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, rotations, spec.globalServiceId(), cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
return spec.zones().stream()
.anyMatch(declaredZone -> declaredZone.deploysTo(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<Rotation> rotations, Optional<String> globalServiceId, String containerClusterName) {
if ( ! rotations.isEmpty() && globalServiceId.isPresent()) {
if (containerClusterName.equals(globalServiceId.get())) {
container.setProp("rotations", rotations.stream().map(Rotation::getId).collect(Collectors.joining(",")));
}
}
}
private void addRoutingAliases(ContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
private void setDefaultMetricConsumerFactory(ContainerCluster cluster) {
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.STATE_MONITOR);
}
private void addDefaultHandlers(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
}
private void addStatusHandlers(ContainerCluster cluster, ConfigModelContext configModelContext) {
if (configModelContext.getDeployState().isHosted()) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
/**
* Intended for use by legacy builders only.
* Will be called during building when using ContainerModelBuilder.
*/
public static void addDefaultHandler_legacyBuilder(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
cluster.addVipHandler();
}
private static void addDefaultHandlersExceptStatus(ContainerCluster cluster) {
cluster.addDefaultRootHandler();
cluster.addMetricStateHandler();
cluster.addApplicationStatusHandler();
cluster.addStatisticsHandler();
}
private void addClientProviders(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder().build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
private void addLegacyFilters(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Component component : buildLegacyFilters(deployState, cluster, spec)) {
cluster.addComponent(component);
}
}
private List<Component> buildLegacyFilters(DeployState deployState, AbstractConfigProducer ancestor, Element spec) {
List<Component> components = new ArrayList<>();
for (Element node : XML.getChildren(spec, "filter")) {
components.add(new DomFilterBuilder().build(deployState, ancestor, node));
}
return components;
}
private void addAccessLogs(DeployState deployState, ContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && cluster.getSearch() != null)
cluster.addDefaultSearchAccessLog();
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
private void addHttp(DeployState deployState, Element spec, ContainerCluster cluster) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
}
private Http buildHttp(DeployState deployState, ContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addRestApis(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(deployState, cluster, restApiElem));
}
}
private void addServlets(DeployState deployState, Element spec, ContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private void addProcessing(DeployState deployState, Element spec, ContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addHandlers(DeployState deployState, ContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder().build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void addNodes(ContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ContainerCluster cluster) {
Container container = new Container(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa());
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(Zone zone, String jvmGCOPtions, boolean isHostedVespa) {
if (jvmGCOPtions != null) {
return jvmGCOPtions;
} else if (zone.system() == SystemName.dev) {
return ContainerCluster.G1GC;
} else if (isHostedVespa) {
return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region()))
? ContainerCluster.G1GC : ContainerCluster.CMS;
} else {
return ContainerCluster.CMS;
}
}
private void addNodesFromXml(ContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
Container node = new Container(cluster, "container.0", 0, cluster.isHostedVespa());
HostResource host = allocateSingleNodeHost(cluster, log, containerElement, context);
node.setHostResource(host);
node.initService(context.getDeployLogger());
cluster.addContainers(Collections.singleton(node));
} else {
List<Container> nodes = createNodes(cluster, nodesElement, context);
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if ( !cluster.getJvmGCOptions().isPresent()) {
String jvmGCOptions = nodesElement.hasAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
? nodesElement.getAttribute(VespaDomBuilder.JVM_GC_OPTIONS)
: null;
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState().zone(), jvmGCOptions, context.getDeployState().isHosted()));
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private List<Container> createNodes(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private void applyRoutingAliasProperties(List<Container> result, ContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private void applyMemoryPercentage(ContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Creates a single host when there is no nodes tag */
private HostResource allocateSingleNodeHost(ContainerCluster cluster, DeployLogger logger, Element containerElement, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.getHostSystem();
if (deployState.isHosted()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleContentHost.get();
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
deployState.getWantedNodeVespaVersion(),
false);
Capacity capacity = Capacity.fromNodeCount(1,
Optional.empty(),
false,
! deployState.getProperties().isBootstrap());
return hostSystem.allocateHosts(clusterSpec, capacity, 1, logger).keySet().iterator().next();
}
} else {
return hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC);
}
}
private List<Container> createNodesFromNodeCount(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().getHostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<Container> createNodesFromNodeType(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
context.getDeployState().getWantedNodeVespaVersion(),
false);
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().getHostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
private List<Container> createNodesFromContentServiceReference(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
String referenceId = nodesElement.getAttribute("of");
Element services = servicesRootOf(nodesElement).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
Element referencedService = findChildById(services, referenceId).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
if ( ! referencedService.getTagName().equals("content"))
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "', " +
"but that is not a content service");
Element referencedNodesElement = XML.getChild(referencedService, "nodes");
if (referencedNodesElement == null)
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "' to supply nodes, " +
"but that service has no <nodes> element");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(NodesSpecification.from(new ModelElement(referencedNodesElement), context),
referenceId,
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context);
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<Container> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ContainerCluster cluster) {
List<Container> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
Container container = new Container(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa());
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<Container> createNodesFromNodeList(DeployState deployState, ContainerCluster cluster, Element nodesElement) {
List<Container> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private IllegalArgumentException clusterReferenceNotFoundException(ContainerCluster cluster, String referenceId) {
return new IllegalArgumentException(cluster + " references service '" + referenceId +
"' but this service is not defined");
}
private Optional<Element> findChildById(Element parent, String id) {
for (Element child : XML.getChildren(parent))
if (id.equals(child.getAttribute("id"))) return Optional.of(child);
return Optional.empty();
}
private boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private void applyNodesTagJvmArgs(List<Container> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private void applyDefaultPreload(List<Container> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ContainerCluster cluster, Element searchElement) {
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(
cluster.getSearch().getChains(), "com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {"http:
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private void addGUIHandler(ContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings("http:
cluster.addComponent(guiHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster cluster, Element spec, String componentName, Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain().ifPresent(domain -> {
AthenzService service = spec.athenzService(zone.environment(), zone.region())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/**
* Disallow renderers named "DefaultRenderer" or "JsonRenderer"
*/
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
} |
Hvert 30 minutt så kan vi sette dette som info. Foren del applikasjoner er jo dette korrekt ikke sant_ | protected void maintain() {
AtomicInteger failures = new AtomicInteger(0);
AtomicInteger zeroQps = new AtomicInteger(0);
AtomicReference<Exception> lastException = new AtomicReference<>(null);
List<Application> applicationList = applications.asList();
ForkJoinPool pool = new ForkJoinPool(applicationsToUpdateInParallel);
pool.submit(() -> {
applicationList.parallelStream().forEach(application -> {
try {
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.with(controller().metricsService().getApplicationMetrics(application.id()))));
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.withRotationStatus(rotationStatus(application))));
for (Deployment deployment : application.deployments().values()) {
MetricsService.DeploymentMetrics deploymentMetrics = controller().metricsService()
.getDeploymentMetrics(application.id(), deployment.zone());
if (deploymentMetrics.queriesPerSecond() < 0.0001) {
zeroQps.incrementAndGet();
}
DeploymentMetrics newMetrics = new DeploymentMetrics(deploymentMetrics.queriesPerSecond(),
deploymentMetrics.writesPerSecond(),
deploymentMetrics.documentCount(),
deploymentMetrics.queryLatencyMillis(),
deploymentMetrics.writeLatencyMillis());
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.with(deployment.zone(), newMetrics)
.recordActivityAt(controller().clock().instant(), deployment.zone())));
}
} catch (Exception e) {
failures.incrementAndGet();
lastException.set(e);
}
});
});
pool.shutdown();
try {
pool.awaitTermination(30, TimeUnit.MINUTES);
log.log(Level.WARNING, String.format("Number of application with 0 qps: %d/%d", zeroQps.get(), applicationList.size()));
if (lastException.get() != null) {
log.log(Level.WARNING, String.format("Failed to query metrics service for %d/%d applications. Last error: %s. Retrying in %s",
failures.get(),
applicationList.size(),
Exceptions.toMessageString(lastException.get()),
maintenanceInterval()));
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} | log.log(Level.WARNING, String.format("Number of application with 0 qps: %d/%d", zeroQps.get(), applicationList.size())); | protected void maintain() {
AtomicInteger failures = new AtomicInteger(0);
AtomicInteger zeroQps = new AtomicInteger(0);
AtomicReference<Exception> lastException = new AtomicReference<>(null);
List<Application> applicationList = applications.asList();
ForkJoinPool pool = new ForkJoinPool(applicationsToUpdateInParallel);
pool.submit(() -> {
applicationList.parallelStream().forEach(application -> {
try {
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.with(controller().metricsService().getApplicationMetrics(application.id()))));
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.withRotationStatus(rotationStatus(application))));
for (Deployment deployment : application.deployments().values()) {
MetricsService.DeploymentMetrics deploymentMetrics = controller().metricsService()
.getDeploymentMetrics(application.id(), deployment.zone());
if (deploymentMetrics.queriesPerSecond() < 0.0001) {
zeroQps.incrementAndGet();
}
DeploymentMetrics newMetrics = new DeploymentMetrics(deploymentMetrics.queriesPerSecond(),
deploymentMetrics.writesPerSecond(),
deploymentMetrics.documentCount(),
deploymentMetrics.queryLatencyMillis(),
deploymentMetrics.writeLatencyMillis());
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.with(deployment.zone(), newMetrics)
.recordActivityAt(controller().clock().instant(), deployment.zone())));
}
} catch (Exception e) {
failures.incrementAndGet();
lastException.set(e);
}
});
});
pool.shutdown();
try {
pool.awaitTermination(30, TimeUnit.MINUTES);
log.log(Level.INFO, String.format("Number of application with 0 qps: %d/%d", zeroQps.get(), applicationList.size()));
if (lastException.get() != null) {
log.log(Level.WARNING, String.format("Failed to query metrics service for %d/%d applications. Last error: %s. Retrying in %s",
failures.get(),
applicationList.size(),
Exceptions.toMessageString(lastException.get()),
maintenanceInterval()));
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} | class DeploymentMetricsMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DeploymentMetricsMaintainer.class.getName());
private static final int applicationsToUpdateInParallel = 10;
private final ApplicationController applications;
public DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) {
super(controller, duration, jobControl);
this.applications = controller.applications();
}
@Override
/** Get global rotation status for application */
private Map<HostName, RotationStatus> rotationStatus(Application application) {
return applications.rotationRepository().getRotation(application)
.map(rotation -> controller().metricsService().getRotationStatus(rotation.name()))
.map(rotationStatus -> {
Map<HostName, RotationStatus> result = new TreeMap<>();
rotationStatus.forEach((hostname, status) -> result.put(hostname, from(status)));
return result;
})
.orElseGet(Collections::emptyMap);
}
private static RotationStatus from(com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus status) {
switch (status) {
case IN: return RotationStatus.in;
case OUT: return RotationStatus.out;
case UNKNOWN: return RotationStatus.unknown;
default: throw new IllegalArgumentException("Unknown API value for rotation status: " + status);
}
}
} | class DeploymentMetricsMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DeploymentMetricsMaintainer.class.getName());
private static final int applicationsToUpdateInParallel = 10;
private final ApplicationController applications;
public DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) {
super(controller, duration, jobControl);
this.applications = controller.applications();
}
@Override
/** Get global rotation status for application */
private Map<HostName, RotationStatus> rotationStatus(Application application) {
return applications.rotationRepository().getRotation(application)
.map(rotation -> controller().metricsService().getRotationStatus(rotation.name()))
.map(rotationStatus -> {
Map<HostName, RotationStatus> result = new TreeMap<>();
rotationStatus.forEach((hostname, status) -> result.put(hostname, from(status)));
return result;
})
.orElseGet(Collections::emptyMap);
}
private static RotationStatus from(com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus status) {
switch (status) {
case IN: return RotationStatus.in;
case OUT: return RotationStatus.out;
case UNKNOWN: return RotationStatus.unknown;
default: throw new IllegalArgumentException("Unknown API value for rotation status: " + status);
}
}
} |
True, made changes in amended commit. | protected void maintain() {
AtomicInteger failures = new AtomicInteger(0);
AtomicInteger zeroQps = new AtomicInteger(0);
AtomicReference<Exception> lastException = new AtomicReference<>(null);
List<Application> applicationList = applications.asList();
ForkJoinPool pool = new ForkJoinPool(applicationsToUpdateInParallel);
pool.submit(() -> {
applicationList.parallelStream().forEach(application -> {
try {
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.with(controller().metricsService().getApplicationMetrics(application.id()))));
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.withRotationStatus(rotationStatus(application))));
for (Deployment deployment : application.deployments().values()) {
MetricsService.DeploymentMetrics deploymentMetrics = controller().metricsService()
.getDeploymentMetrics(application.id(), deployment.zone());
if (deploymentMetrics.queriesPerSecond() < 0.0001) {
zeroQps.incrementAndGet();
}
DeploymentMetrics newMetrics = new DeploymentMetrics(deploymentMetrics.queriesPerSecond(),
deploymentMetrics.writesPerSecond(),
deploymentMetrics.documentCount(),
deploymentMetrics.queryLatencyMillis(),
deploymentMetrics.writeLatencyMillis());
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.with(deployment.zone(), newMetrics)
.recordActivityAt(controller().clock().instant(), deployment.zone())));
}
} catch (Exception e) {
failures.incrementAndGet();
lastException.set(e);
}
});
});
pool.shutdown();
try {
pool.awaitTermination(30, TimeUnit.MINUTES);
log.log(Level.WARNING, String.format("Number of application with 0 qps: %d/%d", zeroQps.get(), applicationList.size()));
if (lastException.get() != null) {
log.log(Level.WARNING, String.format("Failed to query metrics service for %d/%d applications. Last error: %s. Retrying in %s",
failures.get(),
applicationList.size(),
Exceptions.toMessageString(lastException.get()),
maintenanceInterval()));
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} | log.log(Level.WARNING, String.format("Number of application with 0 qps: %d/%d", zeroQps.get(), applicationList.size())); | protected void maintain() {
AtomicInteger failures = new AtomicInteger(0);
AtomicInteger zeroQps = new AtomicInteger(0);
AtomicReference<Exception> lastException = new AtomicReference<>(null);
List<Application> applicationList = applications.asList();
ForkJoinPool pool = new ForkJoinPool(applicationsToUpdateInParallel);
pool.submit(() -> {
applicationList.parallelStream().forEach(application -> {
try {
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.with(controller().metricsService().getApplicationMetrics(application.id()))));
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.withRotationStatus(rotationStatus(application))));
for (Deployment deployment : application.deployments().values()) {
MetricsService.DeploymentMetrics deploymentMetrics = controller().metricsService()
.getDeploymentMetrics(application.id(), deployment.zone());
if (deploymentMetrics.queriesPerSecond() < 0.0001) {
zeroQps.incrementAndGet();
}
DeploymentMetrics newMetrics = new DeploymentMetrics(deploymentMetrics.queriesPerSecond(),
deploymentMetrics.writesPerSecond(),
deploymentMetrics.documentCount(),
deploymentMetrics.queryLatencyMillis(),
deploymentMetrics.writeLatencyMillis());
applications.lockIfPresent(application.id(), locked ->
applications.store(locked.with(deployment.zone(), newMetrics)
.recordActivityAt(controller().clock().instant(), deployment.zone())));
}
} catch (Exception e) {
failures.incrementAndGet();
lastException.set(e);
}
});
});
pool.shutdown();
try {
pool.awaitTermination(30, TimeUnit.MINUTES);
log.log(Level.INFO, String.format("Number of application with 0 qps: %d/%d", zeroQps.get(), applicationList.size()));
if (lastException.get() != null) {
log.log(Level.WARNING, String.format("Failed to query metrics service for %d/%d applications. Last error: %s. Retrying in %s",
failures.get(),
applicationList.size(),
Exceptions.toMessageString(lastException.get()),
maintenanceInterval()));
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} | class DeploymentMetricsMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DeploymentMetricsMaintainer.class.getName());
private static final int applicationsToUpdateInParallel = 10;
private final ApplicationController applications;
public DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) {
super(controller, duration, jobControl);
this.applications = controller.applications();
}
@Override
/** Get global rotation status for application */
private Map<HostName, RotationStatus> rotationStatus(Application application) {
return applications.rotationRepository().getRotation(application)
.map(rotation -> controller().metricsService().getRotationStatus(rotation.name()))
.map(rotationStatus -> {
Map<HostName, RotationStatus> result = new TreeMap<>();
rotationStatus.forEach((hostname, status) -> result.put(hostname, from(status)));
return result;
})
.orElseGet(Collections::emptyMap);
}
private static RotationStatus from(com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus status) {
switch (status) {
case IN: return RotationStatus.in;
case OUT: return RotationStatus.out;
case UNKNOWN: return RotationStatus.unknown;
default: throw new IllegalArgumentException("Unknown API value for rotation status: " + status);
}
}
} | class DeploymentMetricsMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DeploymentMetricsMaintainer.class.getName());
private static final int applicationsToUpdateInParallel = 10;
private final ApplicationController applications;
public DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) {
super(controller, duration, jobControl);
this.applications = controller.applications();
}
@Override
/** Get global rotation status for application */
private Map<HostName, RotationStatus> rotationStatus(Application application) {
return applications.rotationRepository().getRotation(application)
.map(rotation -> controller().metricsService().getRotationStatus(rotation.name()))
.map(rotationStatus -> {
Map<HostName, RotationStatus> result = new TreeMap<>();
rotationStatus.forEach((hostname, status) -> result.put(hostname, from(status)));
return result;
})
.orElseGet(Collections::emptyMap);
}
private static RotationStatus from(com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus status) {
switch (status) {
case IN: return RotationStatus.in;
case OUT: return RotationStatus.out;
case UNKNOWN: return RotationStatus.unknown;
default: throw new IllegalArgumentException("Unknown API value for rotation status: " + status);
}
}
} |
This log should remove | public void handleRequest(BaseRequest request) throws Exception {
BaseResponse response = new BaseResponse();
LOG.debug("receive http request. url=", request.getRequest().uri());
execute(request, response);
} | LOG.debug("receive http request. url=", request.getRequest().uri()); | public void handleRequest(BaseRequest request) throws Exception {
BaseResponse response = new BaseResponse();
LOG.debug("receive http request. url={}", request.getRequest().uri());
execute(request, response);
} | class BaseAction implements IAction {
private static final Logger LOG = LogManager.getLogger(BaseAction.class);
protected QeService qeService = null;
protected ActionController controller;
protected Catalog catalog;
public BaseAction(ActionController controller) {
this.controller = controller;
this.catalog = Catalog.getInstance();
}
public QeService getQeService() {
return qeService;
}
public void setQeService(QeService qeService) {
this.qeService = qeService;
}
@Override
public abstract void execute(BaseRequest request, BaseResponse response) throws DdlException;
protected void writeResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status) {
FullHttpResponse responseObj = null;
try {
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes("UTF-8")));
} catch (UnsupportedEncodingException e) {
LOG.warn("get exception.", e);
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes()));
}
Preconditions.checkNotNull(responseObj);
HttpMethod method = request.getRequest().method();
checkDefaultContentTypeHeader(response, responseObj);
if (!method.equals(HttpMethod.HEAD)) {
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH,
String.valueOf(responseObj.content().readableBytes()));
}
writeCustomHeaders(response, responseObj);
writeCookies(response, responseObj);
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
request.getContext().write(responseObj).addListener(ChannelFutureListener.CLOSE);
} else {
responseObj.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
request.getContext().write(responseObj);
}
}
protected void writeFileResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status,
File resFile) {
HttpResponse responseObj = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
if (HttpHeaders.isKeepAlive(request.getRequest())) {
response.updateHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
}
ChannelFuture sendFileFuture;
ChannelFuture lastContentFuture;
RandomAccessFile rafFile;
try {
rafFile = new RandomAccessFile(resFile, "r");
long fileLength = 0;
fileLength = rafFile.length();
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(fileLength));
writeCookies(response, responseObj);
writeCustomHeaders(response, responseObj);
request.getContext().write(responseObj);
if (request.getContext().pipeline().get(SslHandler.class) == null) {
sendFileFuture = request.getContext().write(new DefaultFileRegion(rafFile.getChannel(), 0, fileLength),
request.getContext().newProgressivePromise());
lastContentFuture = request.getContext().writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
sendFileFuture = request.getContext().writeAndFlush(
new HttpChunkedInput(new ChunkedFile(rafFile, 0, fileLength, 8192)),
request.getContext().newProgressivePromise());
lastContentFuture = sendFileFuture;
}
} catch (FileNotFoundException ignore) {
writeResponse(request, response, HttpResponseStatus.NOT_FOUND);
return;
} catch (IOException e1) {
writeResponse(request, response, HttpResponseStatus.INTERNAL_SERVER_ERROR);
return;
}
sendFileFuture.addListener(new ChannelProgressiveFutureListener() {
@Override
public void operationProgressed(ChannelProgressiveFuture future, long progress, long total) {
if (total < 0) {
LOG.debug("{} Transfer progress: {}", future.channel(), progress);
} else {
LOG.debug("{} Transfer progress: {} / {}", future.channel(), progress, total);
}
}
@Override
public void operationComplete(ChannelProgressiveFuture future) {
LOG.debug("{} Transfer complete.", future.channel());
if (!future.isSuccess()) {
Throwable cause = future.cause();
LOG.error("something wrong. ", cause);
}
}
});
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
}
protected void checkDefaultContentTypeHeader(BaseResponse response, Object responseOj) {
List<String> header = response.getCustomHeaders().get(HttpHeaders.Names.CONTENT_TYPE);
if (header == null) {
response.updateHeader(HttpHeaders.Names.CONTENT_TYPE, "text/html");
}
}
protected void writeCustomHeaders(BaseResponse response, HttpResponse responseObj) {
for (Map.Entry<String, List<String>> entry : response.getHeaders().entrySet()) {
responseObj.headers().add(entry.getKey(), entry.getValue());
}
}
protected void writeCookies(BaseResponse response, HttpResponse responseObj) {
for (Cookie cookie : response.getCookies()) {
responseObj.headers().add(HttpHeaders.Names.SET_COOKIE, ServerCookieEncoder.encode(cookie));
}
}
public static class AuthorizationInfo {
public String fullUserName;
public String password;
public String cluster;
}
public boolean parseAuth(BaseRequest request, AuthorizationInfo authInfo) {
String encodedAuthString = request.getAuthorizationHeader();
if (Strings.isNullOrEmpty(encodedAuthString)) {
return false;
}
String[] parts = encodedAuthString.split(" ");
if (parts.length != 2) {
return false;
}
encodedAuthString = parts[1];
ByteBuf buf = null;
try {
buf = Unpooled.copiedBuffer(ByteBuffer.wrap(encodedAuthString.getBytes()));
String authString = Base64.decode(buf).toString(CharsetUtil.UTF_8);
int index = authString.indexOf(":");
authInfo.fullUserName = authString.substring(0, index);
final String[] elements = authInfo.fullUserName.split("@");
if (elements != null && elements.length < 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER,
authInfo.fullUserName);
authInfo.cluster = SystemInfoService.DEFAULT_CLUSTER;
} else if (elements != null && elements.length == 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(elements[1], elements[0]);
authInfo.cluster = elements[1];
}
authInfo.password = authString.substring(index + 1);
} finally {
if (buf != null) {
buf.release();
}
}
return true;
}
private AuthorizationInfo checkAndGetUser(BaseRequest request)
throws UnauthorizedException {
AuthorizationInfo authInfo = new AuthorizationInfo();
if (!parseAuth(request, authInfo)) {
throw new UnauthorizedException("Need auth information.");
}
byte[] hashedPasswd = catalog.getUserMgr().getPassword(authInfo.fullUserName);
if (hashedPasswd == null) {
throw new UnauthorizedException("No such user(" + authInfo.fullUserName + ")");
}
if (!MysqlPassword.checkPlainPass(hashedPasswd, authInfo.password)) {
throw new UnauthorizedException("Password error");
}
return authInfo;
}
protected void checkAdmin(BaseRequest request) throws UnauthorizedException {
final AuthorizationInfo authInfo = checkAndGetUser(request);
if (!catalog.getUserMgr().isAdmin(authInfo.fullUserName)) {
throw new UnauthorizedException("Administrator needed");
}
}
protected void checkReadPriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_ONLY)) {
throw new UnauthorizedException("Read Privilege needed");
}
}
protected void checkWritePriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_WRITE)) {
throw new UnauthorizedException("Write Privilege needed");
}
}
public AuthorizationInfo getAuthorizationInfo(BaseRequest request)
throws UnauthorizedException {
return checkAndGetUser(request);
}
protected void writeAuthResponse(BaseRequest request, BaseResponse response) {
response.addHeader(HttpHeaders.Names.WWW_AUTHENTICATE, "Basic realm=\"\"");
writeResponse(request, response, HttpResponseStatus.UNAUTHORIZED);
}
} | class BaseAction implements IAction {
private static final Logger LOG = LogManager.getLogger(BaseAction.class);
protected QeService qeService = null;
protected ActionController controller;
protected Catalog catalog;
public BaseAction(ActionController controller) {
this.controller = controller;
this.catalog = Catalog.getInstance();
}
public QeService getQeService() {
return qeService;
}
public void setQeService(QeService qeService) {
this.qeService = qeService;
}
@Override
public abstract void execute(BaseRequest request, BaseResponse response) throws DdlException;
protected void writeResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status) {
FullHttpResponse responseObj = null;
try {
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes("UTF-8")));
} catch (UnsupportedEncodingException e) {
LOG.warn("get exception.", e);
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes()));
}
Preconditions.checkNotNull(responseObj);
HttpMethod method = request.getRequest().method();
checkDefaultContentTypeHeader(response, responseObj);
if (!method.equals(HttpMethod.HEAD)) {
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH,
String.valueOf(responseObj.content().readableBytes()));
}
writeCustomHeaders(response, responseObj);
writeCookies(response, responseObj);
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
request.getContext().write(responseObj).addListener(ChannelFutureListener.CLOSE);
} else {
responseObj.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
request.getContext().write(responseObj);
}
}
protected void writeFileResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status,
File resFile) {
HttpResponse responseObj = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
if (HttpHeaders.isKeepAlive(request.getRequest())) {
response.updateHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
}
ChannelFuture sendFileFuture;
ChannelFuture lastContentFuture;
RandomAccessFile rafFile;
try {
rafFile = new RandomAccessFile(resFile, "r");
long fileLength = 0;
fileLength = rafFile.length();
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(fileLength));
writeCookies(response, responseObj);
writeCustomHeaders(response, responseObj);
request.getContext().write(responseObj);
if (request.getContext().pipeline().get(SslHandler.class) == null) {
sendFileFuture = request.getContext().write(new DefaultFileRegion(rafFile.getChannel(), 0, fileLength),
request.getContext().newProgressivePromise());
lastContentFuture = request.getContext().writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
sendFileFuture = request.getContext().writeAndFlush(
new HttpChunkedInput(new ChunkedFile(rafFile, 0, fileLength, 8192)),
request.getContext().newProgressivePromise());
lastContentFuture = sendFileFuture;
}
} catch (FileNotFoundException ignore) {
writeResponse(request, response, HttpResponseStatus.NOT_FOUND);
return;
} catch (IOException e1) {
writeResponse(request, response, HttpResponseStatus.INTERNAL_SERVER_ERROR);
return;
}
sendFileFuture.addListener(new ChannelProgressiveFutureListener() {
@Override
public void operationProgressed(ChannelProgressiveFuture future, long progress, long total) {
if (total < 0) {
LOG.debug("{} Transfer progress: {}", future.channel(), progress);
} else {
LOG.debug("{} Transfer progress: {} / {}", future.channel(), progress, total);
}
}
@Override
public void operationComplete(ChannelProgressiveFuture future) {
LOG.debug("{} Transfer complete.", future.channel());
if (!future.isSuccess()) {
Throwable cause = future.cause();
LOG.error("something wrong. ", cause);
}
}
});
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
}
protected void checkDefaultContentTypeHeader(BaseResponse response, Object responseOj) {
List<String> header = response.getCustomHeaders().get(HttpHeaders.Names.CONTENT_TYPE);
if (header == null) {
response.updateHeader(HttpHeaders.Names.CONTENT_TYPE, "text/html");
}
}
protected void writeCustomHeaders(BaseResponse response, HttpResponse responseObj) {
for (Map.Entry<String, List<String>> entry : response.getHeaders().entrySet()) {
responseObj.headers().add(entry.getKey(), entry.getValue());
}
}
protected void writeCookies(BaseResponse response, HttpResponse responseObj) {
for (Cookie cookie : response.getCookies()) {
responseObj.headers().add(HttpHeaders.Names.SET_COOKIE, ServerCookieEncoder.encode(cookie));
}
}
public static class AuthorizationInfo {
public String fullUserName;
public String password;
public String cluster;
}
public boolean parseAuth(BaseRequest request, AuthorizationInfo authInfo) {
String encodedAuthString = request.getAuthorizationHeader();
if (Strings.isNullOrEmpty(encodedAuthString)) {
return false;
}
String[] parts = encodedAuthString.split(" ");
if (parts.length != 2) {
return false;
}
encodedAuthString = parts[1];
ByteBuf buf = null;
try {
buf = Unpooled.copiedBuffer(ByteBuffer.wrap(encodedAuthString.getBytes()));
String authString = Base64.decode(buf).toString(CharsetUtil.UTF_8);
int index = authString.indexOf(":");
authInfo.fullUserName = authString.substring(0, index);
final String[] elements = authInfo.fullUserName.split("@");
if (elements != null && elements.length < 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER,
authInfo.fullUserName);
authInfo.cluster = SystemInfoService.DEFAULT_CLUSTER;
} else if (elements != null && elements.length == 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(elements[1], elements[0]);
authInfo.cluster = elements[1];
}
authInfo.password = authString.substring(index + 1);
} finally {
if (buf != null) {
buf.release();
}
}
return true;
}
private AuthorizationInfo checkAndGetUser(BaseRequest request)
throws UnauthorizedException {
AuthorizationInfo authInfo = new AuthorizationInfo();
if (!parseAuth(request, authInfo)) {
throw new UnauthorizedException("Need auth information.");
}
byte[] hashedPasswd = catalog.getUserMgr().getPassword(authInfo.fullUserName);
if (hashedPasswd == null) {
throw new UnauthorizedException("No such user(" + authInfo.fullUserName + ")");
}
if (!MysqlPassword.checkPlainPass(hashedPasswd, authInfo.password)) {
throw new UnauthorizedException("Password error");
}
return authInfo;
}
protected void checkAdmin(BaseRequest request) throws UnauthorizedException {
final AuthorizationInfo authInfo = checkAndGetUser(request);
if (!catalog.getUserMgr().isAdmin(authInfo.fullUserName)) {
throw new UnauthorizedException("Administrator needed");
}
}
protected void checkReadPriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_ONLY)) {
throw new UnauthorizedException("Read Privilege needed");
}
}
protected void checkWritePriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_WRITE)) {
throw new UnauthorizedException("Write Privilege needed");
}
}
public AuthorizationInfo getAuthorizationInfo(BaseRequest request)
throws UnauthorizedException {
return checkAndGetUser(request);
}
protected void writeAuthResponse(BaseRequest request, BaseResponse response) {
response.addHeader(HttpHeaders.Names.WWW_AUTHENTICATE, "Basic realm=\"\"");
writeResponse(request, response, HttpResponseStatus.UNAUTHORIZED);
}
} |
Update `equals`, `hashCode` and `toString` in `TransportSecurityOptions` as well? | public void can_read_options_from_json() throws IOException {
String tlsJson = new String(Files.readAllBytes(TEST_CONFIG_FILE), StandardCharsets.UTF_8);
TransportSecurityOptions expectedOptions = new TransportSecurityOptions("myhost.key", "certs.pem", "my_cas.pem", CIPHERS);
TransportSecurityOptions actualOptions = TransportSecurityOptions.fromJson(tlsJson);
assertEquals(expectedOptions, actualOptions);
} | TransportSecurityOptions expectedOptions = new TransportSecurityOptions("myhost.key", "certs.pem", "my_cas.pem", CIPHERS); | public void can_read_options_from_json() throws IOException {
String tlsJson = new String(Files.readAllBytes(TEST_CONFIG_FILE), StandardCharsets.UTF_8);
TransportSecurityOptions expectedOptions = new TransportSecurityOptions("myhost.key", "certs.pem", "my_cas.pem", CIPHERS);
TransportSecurityOptions actualOptions = TransportSecurityOptions.fromJson(tlsJson);
assertEquals(expectedOptions, actualOptions);
} | class TransportSecurityOptionsTest {
private static final List<String> CIPHERS = Collections.singletonList("TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256");
private static final Path TEST_CONFIG_FILE = Paths.get("src/test/resources/transport-security-options.json");
@Test
public void can_read_options_from_json_file() {
TransportSecurityOptions expectedOptions = new TransportSecurityOptions("myhost.key", "certs.pem", "my_cas.pem", CIPHERS);
TransportSecurityOptions actualOptions = TransportSecurityOptions.fromJsonFile(TEST_CONFIG_FILE);
assertEquals(expectedOptions, actualOptions);
}
@Test
} | class TransportSecurityOptionsTest {
private static final List<String> CIPHERS = Collections.singletonList("TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256");
private static final Path TEST_CONFIG_FILE = Paths.get("src/test/resources/transport-security-options.json");
@Test
public void can_read_options_from_json_file() {
TransportSecurityOptions expectedOptions = new TransportSecurityOptions("myhost.key", "certs.pem", "my_cas.pem", CIPHERS);
TransportSecurityOptions actualOptions = TransportSecurityOptions.fromJsonFile(TEST_CONFIG_FILE);
assertEquals(expectedOptions, actualOptions);
}
@Test
} |
These two ifs are for initializing the generations? Seems to me that other than the first time, we should never set the current generations from the node repo. | void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) {
context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)");
return;
}
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
logChangesToNodeSpec(lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent())
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
} | if (currentRebootGeneration < node.getCurrentRebootGeneration()) | void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) {
context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)");
return;
}
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
logChangesToNodeSpec(lastNode, node);
if (lastNode == null) {
currentRebootGeneration = node.getCurrentRebootGeneration();
currentRestartGeneration = node.getCurrentRestartGeneration();
}
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final Object monitor = new Object();
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private Instant lastConverge;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
}
@Override
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
context.log(logger, "Stopped");
}
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (!Objects.equals(node.getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(node.getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
currentRestartGeneration = node.getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = node.getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final Object monitor = new Object();
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private Instant lastConverge;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
}
@Override
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
context.log(logger, "Stopped");
}
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (node.getWantedRestartGeneration().isPresent() &&
!Objects.equals(node.getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(node.getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
currentRestartGeneration = node.getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = node.getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
That's right. | void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) {
context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)");
return;
}
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
logChangesToNodeSpec(lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent())
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
} | if (currentRebootGeneration < node.getCurrentRebootGeneration()) | void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) {
context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)");
return;
}
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
logChangesToNodeSpec(lastNode, node);
if (lastNode == null) {
currentRebootGeneration = node.getCurrentRebootGeneration();
currentRestartGeneration = node.getCurrentRestartGeneration();
}
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final Object monitor = new Object();
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private Instant lastConverge;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
}
@Override
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
context.log(logger, "Stopped");
}
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (!Objects.equals(node.getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(node.getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
currentRestartGeneration = node.getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = node.getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final Object monitor = new Object();
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private Instant lastConverge;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
}
@Override
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
context.log(logger, "Stopped");
}
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (node.getWantedRestartGeneration().isPresent() &&
!Objects.equals(node.getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(node.getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
currentRestartGeneration = node.getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = node.getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
How about just testing for !currentRestartGeneration.isPresent() ? If currentRestartGeneration.isPresent() then we don't want to update currentRestartGeneration even if !node.getCurrentRestartGeneration().isPresent(). | void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) {
context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)");
return;
}
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
logChangesToNodeSpec(lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent())
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
} | if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent()) | void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) {
context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)");
return;
}
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
logChangesToNodeSpec(lastNode, node);
if (lastNode == null) {
currentRebootGeneration = node.getCurrentRebootGeneration();
currentRestartGeneration = node.getCurrentRestartGeneration();
}
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final Object monitor = new Object();
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private Instant lastConverge;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
}
@Override
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
context.log(logger, "Stopped");
}
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (!Objects.equals(node.getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(node.getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
currentRestartGeneration = node.getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = node.getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final Object monitor = new Object();
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private Instant lastConverge;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
}
@Override
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
context.log(logger, "Stopped");
}
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (node.getWantedRestartGeneration().isPresent() &&
!Objects.equals(node.getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(node.getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
currentRestartGeneration = node.getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = node.getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
The reason it is done this way is: If the node loses allocation, wanted restart generation becomes empty, which is `!=` our restart generation. This will force an update which should fail with https://github.com/vespa-engine/vespa/blob/1e26fba94d5b9101a9da9296dc0ee3ca103fc538/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodePatcher.java#L164 | void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) {
context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)");
return;
}
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
logChangesToNodeSpec(lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent())
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
} | if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent()) | void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) {
context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)");
return;
}
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
logChangesToNodeSpec(lastNode, node);
if (lastNode == null) {
currentRebootGeneration = node.getCurrentRebootGeneration();
currentRestartGeneration = node.getCurrentRestartGeneration();
}
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final Object monitor = new Object();
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private Instant lastConverge;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
}
@Override
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
context.log(logger, "Stopped");
}
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (!Objects.equals(node.getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(node.getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
currentRestartGeneration = node.getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = node.getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final Object monitor = new Object();
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private Instant lastConverge;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
}
@Override
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
context.log(logger, "Stopped");
}
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (node.getWantedRestartGeneration().isPresent() &&
!Objects.equals(node.getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(node.getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
currentRestartGeneration = node.getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = node.getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
This doesn't need to be inside the for, everything for the application is deleted dependent of the `JobType`? | public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(TesterId.of(id), type);
curator.deleteRunData(id, type);
logs.delete(id);
controller.applications().applicationStore().removeAll(id);
controller.applications().applicationStore().removeAll(TesterId.of(id));
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
} | controller.applications().applicationStore().removeAll(id); | public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
controller.applications().applicationStore().removeAll(id);
controller.applications().applicationStore().removeAll(tester);
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
} | class JobController {
private static final int historyLength = 256;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
public JobController(Controller controller, RunDataStore runDataStore, TesterCloud testerCloud) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, runDataStore);
this.cloud = testerCloud;
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log records for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
locked(id, __ -> {
List<LogEntry> entries = messages.stream()
.map(message -> new LogEntry(0, controller.clock().millis(), LogEntry.typeOf(level), message))
.collect(Collectors.toList());
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log record for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
Map<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.putIfAbsent(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.filter(Optional::isPresent).map(Optional::get)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
Iterator<RunId> ids = runs.keySet().iterator();
for (RunId old = ids.next(); old.number() <= last - historyLength; old = ids.next()) {
logs.delete(old);
ids.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, long projectId,
byte[] packageBytes, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally()) {
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().putApplicationPackage(application.get().id(), appVersion, content);
});
}
long run = nextBuild(id);
version.set(ApplicationVersion.from(revision, run));
controller.applications().applicationStore().putApplicationPackage(id,
version.get(),
packageBytes);
controller.applications().applicationStore().putTesterPackage(TesterId.of(id),
version.get(),
testPackageBytes);
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().pruneApplicationPackages(id, oldestDeployed);
controller.applications().applicationStore().pruneTesterPackages(TesterId.of(id), oldestDeployed);
});
controller.applications().storeWithUpdatedConfig(application.withBuiltInternally(true), new ApplicationPackage(packageBytes));
notifyOfNewSubmission(id, projectId, revision, run);
});
return version.get();
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data, packages and tester deployments for applications which are unknown, or no longer built internally. */
public void deactivateTester(TesterId id, JobType type) {
try {
controller.configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NoInstanceException ignored) {
}
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
Optional<URI> testerEndpoint(RunId id) {
ApplicationId tester = id.tester().id();
return controller.applications().getDeploymentEndpoints(new DeploymentId(tester, id.type().zone(controller.system())))
.flatMap(uris -> uris.stream()
.filter(uri -> uri.getHost().contains(String.format("%s--%s--%s.",
tester.instance().value(),
tester.application().value(),
tester.tenant().value())))
.findAny());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void notifyOfNewSubmission(ApplicationId id, long projectId, SourceRevision revision, long number) {
DeploymentJobs.JobReport report = new DeploymentJobs.JobReport(id,
JobType.component,
projectId,
number,
Optional.of(revision),
Optional.empty());
controller.applications().deploymentTrigger().notifyOfCompletion(report);
}
/** Locks and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
private void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} | class JobController {
private static final int historyLength = 256;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
public JobController(Controller controller, RunDataStore runDataStore, TesterCloud testerCloud) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, runDataStore);
this.cloud = testerCloud;
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log records for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
locked(id, __ -> {
List<LogEntry> entries = messages.stream()
.map(message -> new LogEntry(0, controller.clock().millis(), LogEntry.typeOf(level), message))
.collect(Collectors.toList());
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log record for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
Map<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.putIfAbsent(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.filter(Optional::isPresent).map(Optional::get)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
Iterator<RunId> ids = runs.keySet().iterator();
for (RunId old = ids.next(); old.number() <= last - historyLength; old = ids.next()) {
logs.delete(old);
ids.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, long projectId,
byte[] packageBytes, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally()) {
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
}
long run = nextBuild(id);
version.set(ApplicationVersion.from(revision, run));
controller.applications().applicationStore().put(id,
version.get(),
packageBytes);
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
controller.applications().storeWithUpdatedConfig(application.withBuiltInternally(true), new ApplicationPackage(packageBytes));
notifyOfNewSubmission(id, projectId, revision, run);
});
return version.get();
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data, packages and tester deployments for applications which are unknown, or no longer built internally. */
public void deactivateTester(TesterId id, JobType type) {
try {
controller.configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NoInstanceException ignored) {
}
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
Optional<URI> testerEndpoint(RunId id) {
ApplicationId tester = id.tester().id();
return controller.applications().getDeploymentEndpoints(new DeploymentId(tester, id.type().zone(controller.system())))
.flatMap(uris -> uris.stream()
.filter(uri -> uri.getHost().contains(String.format("%s--%s--%s.",
tester.instance().value(),
tester.application().value(),
tester.tenant().value())))
.findAny());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void notifyOfNewSubmission(ApplicationId id, long projectId, SourceRevision revision, long number) {
DeploymentJobs.JobReport report = new DeploymentJobs.JobReport(id,
JobType.component,
projectId,
number,
Optional.of(revision),
Optional.empty());
controller.applications().deploymentTrigger().notifyOfCompletion(report);
}
/** Locks and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
private void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} |
Thanks. | public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(TesterId.of(id), type);
curator.deleteRunData(id, type);
logs.delete(id);
controller.applications().applicationStore().removeAll(id);
controller.applications().applicationStore().removeAll(TesterId.of(id));
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
} | controller.applications().applicationStore().removeAll(id); | public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
controller.applications().applicationStore().removeAll(id);
controller.applications().applicationStore().removeAll(tester);
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
} | class JobController {
private static final int historyLength = 256;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
public JobController(Controller controller, RunDataStore runDataStore, TesterCloud testerCloud) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, runDataStore);
this.cloud = testerCloud;
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log records for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
locked(id, __ -> {
List<LogEntry> entries = messages.stream()
.map(message -> new LogEntry(0, controller.clock().millis(), LogEntry.typeOf(level), message))
.collect(Collectors.toList());
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log record for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
Map<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.putIfAbsent(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.filter(Optional::isPresent).map(Optional::get)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
Iterator<RunId> ids = runs.keySet().iterator();
for (RunId old = ids.next(); old.number() <= last - historyLength; old = ids.next()) {
logs.delete(old);
ids.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, long projectId,
byte[] packageBytes, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally()) {
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().putApplicationPackage(application.get().id(), appVersion, content);
});
}
long run = nextBuild(id);
version.set(ApplicationVersion.from(revision, run));
controller.applications().applicationStore().putApplicationPackage(id,
version.get(),
packageBytes);
controller.applications().applicationStore().putTesterPackage(TesterId.of(id),
version.get(),
testPackageBytes);
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().pruneApplicationPackages(id, oldestDeployed);
controller.applications().applicationStore().pruneTesterPackages(TesterId.of(id), oldestDeployed);
});
controller.applications().storeWithUpdatedConfig(application.withBuiltInternally(true), new ApplicationPackage(packageBytes));
notifyOfNewSubmission(id, projectId, revision, run);
});
return version.get();
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data, packages and tester deployments for applications which are unknown, or no longer built internally. */
public void deactivateTester(TesterId id, JobType type) {
try {
controller.configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NoInstanceException ignored) {
}
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
Optional<URI> testerEndpoint(RunId id) {
ApplicationId tester = id.tester().id();
return controller.applications().getDeploymentEndpoints(new DeploymentId(tester, id.type().zone(controller.system())))
.flatMap(uris -> uris.stream()
.filter(uri -> uri.getHost().contains(String.format("%s--%s--%s.",
tester.instance().value(),
tester.application().value(),
tester.tenant().value())))
.findAny());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void notifyOfNewSubmission(ApplicationId id, long projectId, SourceRevision revision, long number) {
DeploymentJobs.JobReport report = new DeploymentJobs.JobReport(id,
JobType.component,
projectId,
number,
Optional.of(revision),
Optional.empty());
controller.applications().deploymentTrigger().notifyOfCompletion(report);
}
/** Locks and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
private void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} | class JobController {
private static final int historyLength = 256;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
public JobController(Controller controller, RunDataStore runDataStore, TesterCloud testerCloud) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, runDataStore);
this.cloud = testerCloud;
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log records for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
locked(id, __ -> {
List<LogEntry> entries = messages.stream()
.map(message -> new LogEntry(0, controller.clock().millis(), LogEntry.typeOf(level), message))
.collect(Collectors.toList());
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log record for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
Map<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.putIfAbsent(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.filter(Optional::isPresent).map(Optional::get)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
Iterator<RunId> ids = runs.keySet().iterator();
for (RunId old = ids.next(); old.number() <= last - historyLength; old = ids.next()) {
logs.delete(old);
ids.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, long projectId,
byte[] packageBytes, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally()) {
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
}
long run = nextBuild(id);
version.set(ApplicationVersion.from(revision, run));
controller.applications().applicationStore().put(id,
version.get(),
packageBytes);
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
controller.applications().storeWithUpdatedConfig(application.withBuiltInternally(true), new ApplicationPackage(packageBytes));
notifyOfNewSubmission(id, projectId, revision, run);
});
return version.get();
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data, packages and tester deployments for applications which are unknown, or no longer built internally. */
public void deactivateTester(TesterId id, JobType type) {
try {
controller.configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NoInstanceException ignored) {
}
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
Optional<URI> testerEndpoint(RunId id) {
ApplicationId tester = id.tester().id();
return controller.applications().getDeploymentEndpoints(new DeploymentId(tester, id.type().zone(controller.system())))
.flatMap(uris -> uris.stream()
.filter(uri -> uri.getHost().contains(String.format("%s--%s--%s.",
tester.instance().value(),
tester.application().value(),
tester.tenant().value())))
.findAny());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void notifyOfNewSubmission(ApplicationId id, long projectId, SourceRevision revision, long number) {
DeploymentJobs.JobReport report = new DeploymentJobs.JobReport(id,
JobType.component,
projectId,
number,
Optional.of(revision),
Optional.empty());
controller.applications().deploymentTrigger().notifyOfCompletion(report);
}
/** Locks and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
private void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} |
Yes, I will go over and consolidate QrStartConfig generation in the next PR. | public void getConfig(QrStartConfig.Builder builder) {
builder.jvm.heapsize(512);
builder.jvm.heapSizeAsPercentageOfPhysicalMemory(0);
builder.jvm.availableProcessors(2);
builder.jvm.verbosegc(false);
} | builder.jvm.heapSizeAsPercentageOfPhysicalMemory(0); | public void getConfig(QrStartConfig.Builder builder) {
builder.jvm
.verbosegc(false)
.availableProcessors(2)
.heapsize(512)
.heapSizeAsPercentageOfPhysicalMemory(0);
} | class ClusterControllerContainer extends Container implements
BundlesConfig.Producer,
ZookeeperServerConfig.Producer,
QrStartConfig.Producer
{
private static final ComponentSpecification CLUSTERCONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-apps");
private static final ComponentSpecification ZKFACADE_BUNDLE = new ComponentSpecification("zkfacade");
private final Set<String> bundles = new TreeSet<>();
public ClusterControllerContainer(AbstractConfigProducer parent, int index, boolean runStandaloneZooKeeper, boolean isHosted) {
super(parent, "" + index, index);
addHandler(
new Handler(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-status"),
new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StatusHandler"),
CLUSTERCONTROLLER_BUNDLE))), "clustercontroller-status/*"
);
addHandler(
new Handler(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-state-restapi-v2"),
new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StateRestApiV2Handler"),
CLUSTERCONTROLLER_BUNDLE))), "cluster/v2/*"
);
if (runStandaloneZooKeeper) {
addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-zkrunner"),
new ComponentSpecification("com.yahoo.vespa.zookeeper.ZooKeeperServer"), ZKFACADE_BUNDLE))));
addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-zkprovider"),
new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StandaloneZooKeeperProvider"), CLUSTERCONTROLLER_BUNDLE))));
} else {
addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-zkprovider"),
new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.DummyZooKeeperProvider"), CLUSTERCONTROLLER_BUNDLE))));
}
addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-apps-jar-with-dependencies.jar"));
addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-apputil-jar-with-dependencies.jar"));
addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-core-jar-with-dependencies.jar"));
addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-utils-jar-with-dependencies.jar"));
addBundle("file:" + getDefaults().underVespaHome("lib/jars/zkfacade-jar-with-dependencies.jar"));
log.log(LogLevel.DEBUG, "Adding access log for cluster controller ...");
addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, "controller", isHosted));
}
@Override
public int getWantedPort() {
return 19050;
}
@Override
public boolean requiresWantedPort() {
return index() == 0;
}
@Override
public ContainerServiceType myServiceType() {
return ContainerServiceType.CLUSTERCONTROLLER_CONTAINER;
}
private void addHandler(Handler h, String binding) {
h.addServerBindings("http:
super.addHandler(h);
}
public void addBundle(String bundlePath) {
bundles.add(bundlePath);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
for (String bundle : bundles) {
builder.bundle(bundle);
}
}
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
builder.myid(index());
}
@Override
} | class ClusterControllerContainer extends Container implements
BundlesConfig.Producer,
ZookeeperServerConfig.Producer,
QrStartConfig.Producer
{
private static final ComponentSpecification CLUSTERCONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-apps");
private static final ComponentSpecification ZKFACADE_BUNDLE = new ComponentSpecification("zkfacade");
private final Set<String> bundles = new TreeSet<>();
public ClusterControllerContainer(AbstractConfigProducer parent, int index, boolean runStandaloneZooKeeper, boolean isHosted) {
super(parent, "" + index, index);
addHandler(
new Handler(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-status"),
new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StatusHandler"),
CLUSTERCONTROLLER_BUNDLE))), "clustercontroller-status/*"
);
addHandler(
new Handler(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-state-restapi-v2"),
new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StateRestApiV2Handler"),
CLUSTERCONTROLLER_BUNDLE))), "cluster/v2/*"
);
if (runStandaloneZooKeeper) {
addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-zkrunner"),
new ComponentSpecification("com.yahoo.vespa.zookeeper.ZooKeeperServer"), ZKFACADE_BUNDLE))));
addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-zkprovider"),
new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StandaloneZooKeeperProvider"), CLUSTERCONTROLLER_BUNDLE))));
} else {
addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification(
new ComponentSpecification("clustercontroller-zkprovider"),
new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.DummyZooKeeperProvider"), CLUSTERCONTROLLER_BUNDLE))));
}
addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-apps-jar-with-dependencies.jar"));
addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-apputil-jar-with-dependencies.jar"));
addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-core-jar-with-dependencies.jar"));
addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-utils-jar-with-dependencies.jar"));
addBundle("file:" + getDefaults().underVespaHome("lib/jars/zkfacade-jar-with-dependencies.jar"));
log.log(LogLevel.DEBUG, "Adding access log for cluster controller ...");
addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, "controller", isHosted));
}
@Override
public int getWantedPort() {
return 19050;
}
@Override
public boolean requiresWantedPort() {
return index() == 0;
}
@Override
public ContainerServiceType myServiceType() {
return ContainerServiceType.CLUSTERCONTROLLER_CONTAINER;
}
private void addHandler(Handler h, String binding) {
h.addServerBindings("http:
super.addHandler(h);
}
public void addBundle(String bundlePath) {
bundles.add(bundlePath);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
for (String bundle : bundles) {
builder.bundle(bundle);
}
}
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
builder.myid(index());
}
@Override
} |
I would expect this to return the `new UnixPath(to)`? | public UnixPath atomicMove(Path to) {
uncheck(() -> Files.move(path, to, StandardCopyOption.ATOMIC_MOVE));
return this;
} | return this; | public UnixPath atomicMove(Path to) {
uncheck(() -> Files.move(path, to, StandardCopyOption.ATOMIC_MOVE));
return new UnixPath(to);
} | class UnixPath {
private final Path path;
public UnixPath(Path path) {
this.path = path;
}
public UnixPath(String path) {
this(Paths.get(path));
}
public Path toPath() {
return path;
}
public boolean createParents() {
Path parent = path.getParent();
if (Files.isDirectory(parent)) {
return false;
}
uncheck(() -> Files.createDirectories(parent));
return true;
}
public String readUtf8File() {
return new String(readBytes(), StandardCharsets.UTF_8);
}
public byte[] readBytes() {
return uncheck(() -> Files.readAllBytes(path));
}
public UnixPath writeUtf8File(String content, OpenOption... options) {
return writeBytes(content.getBytes(StandardCharsets.UTF_8), options);
}
public UnixPath writeBytes(byte[] content, OpenOption... options) {
uncheck(() -> Files.write(path, content, options));
return this;
}
public String getPermissions() {
return getAttributes().permissions();
}
/**
* @param permissions Example: "rwxr-x---" means rwx for owner, rx for group,
* and no permissions for others.
*/
public UnixPath setPermissions(String permissions) {
Set<PosixFilePermission> permissionSet = getPosixFilePermissionsFromString(permissions);
uncheck(() -> Files.setPosixFilePermissions(path, permissionSet));
return this;
}
public String getOwner() {
return getAttributes().owner();
}
public UnixPath setOwner(String owner) {
UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService();
UserPrincipal principal = uncheck(
() -> service.lookupPrincipalByName(owner),
"While looking up user %s", owner);
uncheck(() -> Files.setOwner(path, principal));
return this;
}
public String getGroup() {
return getAttributes().group();
}
public UnixPath setGroup(String group) {
UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService();
GroupPrincipal principal = uncheck(
() -> service.lookupPrincipalByGroupName(group),
"while looking up group %s", group);
uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal));
return this;
}
public Instant getLastModifiedTime() {
return getAttributes().lastModifiedTime();
}
public FileAttributes getAttributes() {
PosixFileAttributes attributes = uncheck(() ->
Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes());
return new FileAttributes(attributes);
}
public Optional<FileAttributes> getAttributesIfExists() {
return IOExceptionUtil.ifExists(this::getAttributes);
}
public UnixPath createNewFile() {
uncheck(() -> Files.createFile(path));
return this;
}
public UnixPath createNewFile(String permissions) {
FileAttribute<?> attribute = PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString(permissions));
uncheck(() -> Files.createFile(path, attribute));
return this;
}
public UnixPath createDirectory(String permissions) {
Set<PosixFilePermission> set = getPosixFilePermissionsFromString(permissions);
FileAttribute<Set<PosixFilePermission>> attribute = PosixFilePermissions.asFileAttribute(set);
uncheck(() -> Files.createDirectory(path, attribute));
return this;
}
public UnixPath createDirectory() {
uncheck(() -> Files.createDirectory(path));
return this;
}
public boolean isDirectory() {
return uncheck(() -> Files.isDirectory(path));
}
/**
* Similar to rm -rf file:
* - It's not an error if file doesn't exist
* - If file is a directory, it and all content is removed
* - For symlinks: Only the symlink is removed, not what the symlink points to
*/
public boolean deleteRecursively() {
if (isDirectory()) {
for (UnixPath path : listContentsOfDirectory()) {
path.deleteRecursively();
}
}
return deleteIfExists();
}
public boolean deleteIfExists() {
return uncheck(() -> Files.deleteIfExists(path));
}
public List<UnixPath> listContentsOfDirectory() {
try (Stream<Path> stream = Files.list(path)){
return stream
.map(UnixPath::new)
.collect(Collectors.toList());
} catch (NoSuchFileException ignored) {
return Collections.emptyList();
} catch (IOException e) {
throw new RuntimeException("Failed to list contents of directory " + path.toAbsolutePath(), e);
}
}
/** This path must be on the same file system as the to-path. */
public boolean moveIfExists(Path to) {
try {
Files.move(path, to);
return true;
} catch (NoSuchFileException ignored) {
return false;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public String toString() {
return path.toString();
}
private Set<PosixFilePermission> getPosixFilePermissionsFromString(String permissions) {
try {
return PosixFilePermissions.fromString(permissions);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Failed to set permissions '" +
permissions + "' on path " + path, e);
}
}
} | class UnixPath {
private final Path path;
public UnixPath(Path path) {
this.path = path;
}
public UnixPath(String path) {
this(Paths.get(path));
}
public Path toPath() {
return path;
}
public boolean createParents() {
Path parent = path.getParent();
if (Files.isDirectory(parent)) {
return false;
}
uncheck(() -> Files.createDirectories(parent));
return true;
}
public String readUtf8File() {
return new String(readBytes(), StandardCharsets.UTF_8);
}
public byte[] readBytes() {
return uncheck(() -> Files.readAllBytes(path));
}
public UnixPath writeUtf8File(String content, OpenOption... options) {
return writeBytes(content.getBytes(StandardCharsets.UTF_8), options);
}
public UnixPath writeBytes(byte[] content, OpenOption... options) {
uncheck(() -> Files.write(path, content, options));
return this;
}
public String getPermissions() {
return getAttributes().permissions();
}
/**
* @param permissions Example: "rwxr-x---" means rwx for owner, rx for group,
* and no permissions for others.
*/
public UnixPath setPermissions(String permissions) {
Set<PosixFilePermission> permissionSet = getPosixFilePermissionsFromString(permissions);
uncheck(() -> Files.setPosixFilePermissions(path, permissionSet));
return this;
}
public String getOwner() {
return getAttributes().owner();
}
public UnixPath setOwner(String owner) {
UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService();
UserPrincipal principal = uncheck(
() -> service.lookupPrincipalByName(owner),
"While looking up user %s", owner);
uncheck(() -> Files.setOwner(path, principal));
return this;
}
public String getGroup() {
return getAttributes().group();
}
public UnixPath setGroup(String group) {
UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService();
GroupPrincipal principal = uncheck(
() -> service.lookupPrincipalByGroupName(group),
"while looking up group %s", group);
uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal));
return this;
}
public Instant getLastModifiedTime() {
return getAttributes().lastModifiedTime();
}
public FileAttributes getAttributes() {
PosixFileAttributes attributes = uncheck(() ->
Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes());
return new FileAttributes(attributes);
}
public Optional<FileAttributes> getAttributesIfExists() {
return IOExceptionUtil.ifExists(this::getAttributes);
}
public UnixPath createNewFile() {
uncheck(() -> Files.createFile(path));
return this;
}
public UnixPath createNewFile(String permissions) {
FileAttribute<?> attribute = PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString(permissions));
uncheck(() -> Files.createFile(path, attribute));
return this;
}
public UnixPath createDirectory(String permissions) {
Set<PosixFilePermission> set = getPosixFilePermissionsFromString(permissions);
FileAttribute<Set<PosixFilePermission>> attribute = PosixFilePermissions.asFileAttribute(set);
uncheck(() -> Files.createDirectory(path, attribute));
return this;
}
public UnixPath createDirectory() {
uncheck(() -> Files.createDirectory(path));
return this;
}
public boolean isDirectory() {
return uncheck(() -> Files.isDirectory(path));
}
/**
* Similar to rm -rf file:
* - It's not an error if file doesn't exist
* - If file is a directory, it and all content is removed
* - For symlinks: Only the symlink is removed, not what the symlink points to
*/
public boolean deleteRecursively() {
if (isDirectory()) {
for (UnixPath path : listContentsOfDirectory()) {
path.deleteRecursively();
}
}
return uncheck(() -> Files.deleteIfExists(path));
}
public UnixPath deleteIfExists() {
uncheck(() -> Files.deleteIfExists(path));
return this;
}
public List<UnixPath> listContentsOfDirectory() {
try (Stream<Path> stream = Files.list(path)){
return stream
.map(UnixPath::new)
.collect(Collectors.toList());
} catch (NoSuchFileException ignored) {
return Collections.emptyList();
} catch (IOException e) {
throw new RuntimeException("Failed to list contents of directory " + path.toAbsolutePath(), e);
}
}
/** This path must be on the same file system as the to-path. Returns UnixPath of 'to'. */
public boolean moveIfExists(Path to) {
try {
Files.move(path, to);
return true;
} catch (NoSuchFileException ignored) {
return false;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public String toString() {
return path.toString();
}
private Set<PosixFilePermission> getPosixFilePermissionsFromString(String permissions) {
try {
return PosixFilePermissions.fromString(permissions);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Failed to set permissions '" +
permissions + "' on path " + path, e);
}
}
} |
Agree | public UnixPath atomicMove(Path to) {
uncheck(() -> Files.move(path, to, StandardCopyOption.ATOMIC_MOVE));
return this;
} | return this; | public UnixPath atomicMove(Path to) {
uncheck(() -> Files.move(path, to, StandardCopyOption.ATOMIC_MOVE));
return new UnixPath(to);
} | class UnixPath {
private final Path path;
public UnixPath(Path path) {
this.path = path;
}
public UnixPath(String path) {
this(Paths.get(path));
}
public Path toPath() {
return path;
}
public boolean createParents() {
Path parent = path.getParent();
if (Files.isDirectory(parent)) {
return false;
}
uncheck(() -> Files.createDirectories(parent));
return true;
}
public String readUtf8File() {
return new String(readBytes(), StandardCharsets.UTF_8);
}
public byte[] readBytes() {
return uncheck(() -> Files.readAllBytes(path));
}
public UnixPath writeUtf8File(String content, OpenOption... options) {
return writeBytes(content.getBytes(StandardCharsets.UTF_8), options);
}
public UnixPath writeBytes(byte[] content, OpenOption... options) {
uncheck(() -> Files.write(path, content, options));
return this;
}
public String getPermissions() {
return getAttributes().permissions();
}
/**
* @param permissions Example: "rwxr-x---" means rwx for owner, rx for group,
* and no permissions for others.
*/
public UnixPath setPermissions(String permissions) {
Set<PosixFilePermission> permissionSet = getPosixFilePermissionsFromString(permissions);
uncheck(() -> Files.setPosixFilePermissions(path, permissionSet));
return this;
}
public String getOwner() {
return getAttributes().owner();
}
public UnixPath setOwner(String owner) {
UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService();
UserPrincipal principal = uncheck(
() -> service.lookupPrincipalByName(owner),
"While looking up user %s", owner);
uncheck(() -> Files.setOwner(path, principal));
return this;
}
public String getGroup() {
return getAttributes().group();
}
public UnixPath setGroup(String group) {
UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService();
GroupPrincipal principal = uncheck(
() -> service.lookupPrincipalByGroupName(group),
"while looking up group %s", group);
uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal));
return this;
}
public Instant getLastModifiedTime() {
return getAttributes().lastModifiedTime();
}
public FileAttributes getAttributes() {
PosixFileAttributes attributes = uncheck(() ->
Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes());
return new FileAttributes(attributes);
}
public Optional<FileAttributes> getAttributesIfExists() {
return IOExceptionUtil.ifExists(this::getAttributes);
}
public UnixPath createNewFile() {
uncheck(() -> Files.createFile(path));
return this;
}
public UnixPath createNewFile(String permissions) {
FileAttribute<?> attribute = PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString(permissions));
uncheck(() -> Files.createFile(path, attribute));
return this;
}
public UnixPath createDirectory(String permissions) {
Set<PosixFilePermission> set = getPosixFilePermissionsFromString(permissions);
FileAttribute<Set<PosixFilePermission>> attribute = PosixFilePermissions.asFileAttribute(set);
uncheck(() -> Files.createDirectory(path, attribute));
return this;
}
public UnixPath createDirectory() {
uncheck(() -> Files.createDirectory(path));
return this;
}
public boolean isDirectory() {
return uncheck(() -> Files.isDirectory(path));
}
/**
* Similar to rm -rf file:
* - It's not an error if file doesn't exist
* - If file is a directory, it and all content is removed
* - For symlinks: Only the symlink is removed, not what the symlink points to
*/
public boolean deleteRecursively() {
if (isDirectory()) {
for (UnixPath path : listContentsOfDirectory()) {
path.deleteRecursively();
}
}
return deleteIfExists();
}
public boolean deleteIfExists() {
return uncheck(() -> Files.deleteIfExists(path));
}
public List<UnixPath> listContentsOfDirectory() {
try (Stream<Path> stream = Files.list(path)){
return stream
.map(UnixPath::new)
.collect(Collectors.toList());
} catch (NoSuchFileException ignored) {
return Collections.emptyList();
} catch (IOException e) {
throw new RuntimeException("Failed to list contents of directory " + path.toAbsolutePath(), e);
}
}
/** This path must be on the same file system as the to-path. */
public boolean moveIfExists(Path to) {
try {
Files.move(path, to);
return true;
} catch (NoSuchFileException ignored) {
return false;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public String toString() {
return path.toString();
}
private Set<PosixFilePermission> getPosixFilePermissionsFromString(String permissions) {
try {
return PosixFilePermissions.fromString(permissions);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Failed to set permissions '" +
permissions + "' on path " + path, e);
}
}
} | class UnixPath {
private final Path path;
public UnixPath(Path path) {
this.path = path;
}
public UnixPath(String path) {
this(Paths.get(path));
}
public Path toPath() {
return path;
}
public boolean createParents() {
Path parent = path.getParent();
if (Files.isDirectory(parent)) {
return false;
}
uncheck(() -> Files.createDirectories(parent));
return true;
}
public String readUtf8File() {
return new String(readBytes(), StandardCharsets.UTF_8);
}
public byte[] readBytes() {
return uncheck(() -> Files.readAllBytes(path));
}
public UnixPath writeUtf8File(String content, OpenOption... options) {
return writeBytes(content.getBytes(StandardCharsets.UTF_8), options);
}
public UnixPath writeBytes(byte[] content, OpenOption... options) {
uncheck(() -> Files.write(path, content, options));
return this;
}
public String getPermissions() {
return getAttributes().permissions();
}
/**
* @param permissions Example: "rwxr-x---" means rwx for owner, rx for group,
* and no permissions for others.
*/
public UnixPath setPermissions(String permissions) {
Set<PosixFilePermission> permissionSet = getPosixFilePermissionsFromString(permissions);
uncheck(() -> Files.setPosixFilePermissions(path, permissionSet));
return this;
}
public String getOwner() {
return getAttributes().owner();
}
public UnixPath setOwner(String owner) {
UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService();
UserPrincipal principal = uncheck(
() -> service.lookupPrincipalByName(owner),
"While looking up user %s", owner);
uncheck(() -> Files.setOwner(path, principal));
return this;
}
public String getGroup() {
return getAttributes().group();
}
public UnixPath setGroup(String group) {
UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService();
GroupPrincipal principal = uncheck(
() -> service.lookupPrincipalByGroupName(group),
"while looking up group %s", group);
uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal));
return this;
}
public Instant getLastModifiedTime() {
return getAttributes().lastModifiedTime();
}
public FileAttributes getAttributes() {
PosixFileAttributes attributes = uncheck(() ->
Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes());
return new FileAttributes(attributes);
}
public Optional<FileAttributes> getAttributesIfExists() {
return IOExceptionUtil.ifExists(this::getAttributes);
}
public UnixPath createNewFile() {
uncheck(() -> Files.createFile(path));
return this;
}
public UnixPath createNewFile(String permissions) {
FileAttribute<?> attribute = PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString(permissions));
uncheck(() -> Files.createFile(path, attribute));
return this;
}
public UnixPath createDirectory(String permissions) {
Set<PosixFilePermission> set = getPosixFilePermissionsFromString(permissions);
FileAttribute<Set<PosixFilePermission>> attribute = PosixFilePermissions.asFileAttribute(set);
uncheck(() -> Files.createDirectory(path, attribute));
return this;
}
public UnixPath createDirectory() {
uncheck(() -> Files.createDirectory(path));
return this;
}
public boolean isDirectory() {
return uncheck(() -> Files.isDirectory(path));
}
/**
* Similar to rm -rf file:
* - It's not an error if file doesn't exist
* - If file is a directory, it and all content is removed
* - For symlinks: Only the symlink is removed, not what the symlink points to
*/
public boolean deleteRecursively() {
if (isDirectory()) {
for (UnixPath path : listContentsOfDirectory()) {
path.deleteRecursively();
}
}
return uncheck(() -> Files.deleteIfExists(path));
}
public UnixPath deleteIfExists() {
uncheck(() -> Files.deleteIfExists(path));
return this;
}
public List<UnixPath> listContentsOfDirectory() {
try (Stream<Path> stream = Files.list(path)){
return stream
.map(UnixPath::new)
.collect(Collectors.toList());
} catch (NoSuchFileException ignored) {
return Collections.emptyList();
} catch (IOException e) {
throw new RuntimeException("Failed to list contents of directory " + path.toAbsolutePath(), e);
}
}
/** This path must be on the same file system as the to-path. Returns UnixPath of 'to'. */
public boolean moveIfExists(Path to) {
try {
Files.move(path, to);
return true;
} catch (NoSuchFileException ignored) {
return false;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public String toString() {
return path.toString();
}
private Set<PosixFilePermission> getPosixFilePermissionsFromString(String permissions) {
try {
return PosixFilePermissions.fromString(permissions);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Failed to set permissions '" +
permissions + "' on path " + path, e);
}
}
} |
You can just pass `list` here instead of creating the same `NodeList` | void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
ResourceCapacity wantedResourceCapacity = ResourceCapacity.of(getFlavor(requestedNodes));
NodeList list = new NodeList(allNodes);
for (Node node : allNodes) {
if (node.type() != NodeType.host) continue;
if (node.state() != Node.State.active) continue;
if (node.status().wantToRetire()) continue;
boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(node, wantedResourceCapacity);
boolean conflictingCluster = list.childrenOf(node).owner(appId).asList().stream()
.anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id()));
if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue;
log.log(LogLevel.DEBUG, "Trying to add new Docker node on " + node);
Optional<IP.Allocation> allocation = node.ipAddressPool().findAllocation(new NodeList(allNodes));
if (!allocation.isPresent()) continue;
String hostname;
try {
hostname = allocation.get().resolveHostname(nameResolver);
} catch (IllegalArgumentException e) {
log.log(LogLevel.WARNING, "Failed to resolve hostname for allocation: " + allocation.get() + ", skipping", e);
continue;
}
Node newNode = Node.createDockerNode("fake-" + hostname,
allocation.get().addresses(),
Collections.emptySet(),
hostname,
Optional.of(node.hostname()),
getFlavor(requestedNodes),
NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
log.log(LogLevel.DEBUG, "Adding new Docker node " + newNode);
nodes.put(newNode, nodePri);
}
}
} | Optional<IP.Allocation> allocation = node.ipAddressPool().findAllocation(new NodeList(allNodes)); | void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
ResourceCapacity wantedResourceCapacity = ResourceCapacity.of(getFlavor(requestedNodes));
NodeList list = new NodeList(allNodes);
for (Node node : allNodes) {
if (node.type() != NodeType.host) continue;
if (node.state() != Node.State.active) continue;
if (node.status().wantToRetire()) continue;
boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(node, wantedResourceCapacity);
boolean conflictingCluster = list.childrenOf(node).owner(appId).asList().stream()
.anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id()));
if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue;
log.log(LogLevel.DEBUG, "Trying to add new Docker node on " + node);
Optional<IP.Allocation> allocation = node.ipAddressPool().findAllocation(list);
if (!allocation.isPresent()) continue;
String hostname;
try {
hostname = allocation.get().resolveHostname(nameResolver);
} catch (IllegalArgumentException e) {
log.log(LogLevel.WARNING, "Failed to resolve hostname for allocation: " + allocation.get() + ", skipping", e);
continue;
}
Node newNode = Node.createDockerNode("fake-" + hostname,
allocation.get().addresses(),
Collections.emptySet(),
hostname,
Optional.of(node.hostname()),
getFlavor(requestedNodes),
NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
log.log(LogLevel.DEBUG, "Adding new Docker node " + newNode);
nodes.put(newNode, nodePri);
}
}
} | class NodePrioritizer {
private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName());
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final NameResolver nameResolver;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec,
int spares, NameResolver nameResolver) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.nameResolver = nameResolver;
this.spareHosts = findSpareHosts(allNodes, spares);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocated to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childrenOf(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && b.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
}
} | class NodePrioritizer {
private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName());
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final NameResolver nameResolver;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec,
int spares, NameResolver nameResolver) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.nameResolver = nameResolver;
this.spareHosts = findSpareHosts(allNodes, spares);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocated to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childrenOf(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && b.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
}
} |
Thanks, fixed. | void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
ResourceCapacity wantedResourceCapacity = ResourceCapacity.of(getFlavor(requestedNodes));
NodeList list = new NodeList(allNodes);
for (Node node : allNodes) {
if (node.type() != NodeType.host) continue;
if (node.state() != Node.State.active) continue;
if (node.status().wantToRetire()) continue;
boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(node, wantedResourceCapacity);
boolean conflictingCluster = list.childrenOf(node).owner(appId).asList().stream()
.anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id()));
if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue;
log.log(LogLevel.DEBUG, "Trying to add new Docker node on " + node);
Optional<IP.Allocation> allocation = node.ipAddressPool().findAllocation(new NodeList(allNodes));
if (!allocation.isPresent()) continue;
String hostname;
try {
hostname = allocation.get().resolveHostname(nameResolver);
} catch (IllegalArgumentException e) {
log.log(LogLevel.WARNING, "Failed to resolve hostname for allocation: " + allocation.get() + ", skipping", e);
continue;
}
Node newNode = Node.createDockerNode("fake-" + hostname,
allocation.get().addresses(),
Collections.emptySet(),
hostname,
Optional.of(node.hostname()),
getFlavor(requestedNodes),
NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
log.log(LogLevel.DEBUG, "Adding new Docker node " + newNode);
nodes.put(newNode, nodePri);
}
}
} | Optional<IP.Allocation> allocation = node.ipAddressPool().findAllocation(new NodeList(allNodes)); | void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
ResourceCapacity wantedResourceCapacity = ResourceCapacity.of(getFlavor(requestedNodes));
NodeList list = new NodeList(allNodes);
for (Node node : allNodes) {
if (node.type() != NodeType.host) continue;
if (node.state() != Node.State.active) continue;
if (node.status().wantToRetire()) continue;
boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(node, wantedResourceCapacity);
boolean conflictingCluster = list.childrenOf(node).owner(appId).asList().stream()
.anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id()));
if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue;
log.log(LogLevel.DEBUG, "Trying to add new Docker node on " + node);
Optional<IP.Allocation> allocation = node.ipAddressPool().findAllocation(list);
if (!allocation.isPresent()) continue;
String hostname;
try {
hostname = allocation.get().resolveHostname(nameResolver);
} catch (IllegalArgumentException e) {
log.log(LogLevel.WARNING, "Failed to resolve hostname for allocation: " + allocation.get() + ", skipping", e);
continue;
}
Node newNode = Node.createDockerNode("fake-" + hostname,
allocation.get().addresses(),
Collections.emptySet(),
hostname,
Optional.of(node.hostname()),
getFlavor(requestedNodes),
NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
log.log(LogLevel.DEBUG, "Adding new Docker node " + newNode);
nodes.put(newNode, nodePri);
}
}
} | class NodePrioritizer {
private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName());
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final NameResolver nameResolver;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec,
int spares, NameResolver nameResolver) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.nameResolver = nameResolver;
this.spareHosts = findSpareHosts(allNodes, spares);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocated to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childrenOf(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && b.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
}
} | class NodePrioritizer {
private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName());
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final NameResolver nameResolver;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec,
int spares, NameResolver nameResolver) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.nameResolver = nameResolver;
this.spareHosts = findSpareHosts(allNodes, spares);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocated to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childrenOf(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && b.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
}
} |
This message seems wrong since it is not trying to delete anything in the try clause. | public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath) {
if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory");
Set<String> fileReferencesInUse = new HashSet<>();
Set<ApplicationId> applicationIds = listApplications();
applicationIds.forEach(applicationId -> {
try {
Set<String> fileReferences = getApplication(applicationId).getModel().fileReferences()
.stream()
.map(FileReference::value)
.collect(Collectors.toSet());
fileReferencesInUse.addAll(fileReferences);
} catch (IllegalArgumentException e) {
log.log(LogLevel.WARNING, "Failed deleting unused file references for ': " + applicationId + "'", e);
}
});
log.log(LogLevel.DEBUG, "File references in use : " + fileReferencesInUse);
Set<String> fileReferencesOnDisk = new HashSet<>();
File[] filesOnDisk = fileReferencesPath.listFiles();
if (filesOnDisk != null)
fileReferencesOnDisk.addAll(Arrays.stream(filesOnDisk).map(File::getName).collect(Collectors.toSet()));
log.log(LogLevel.DEBUG, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk);
Instant instant = Instant.now().minus(Duration.ofDays(14));
Set<String> fileReferencesToDelete = fileReferencesOnDisk
.stream()
.filter(fileReference -> ! fileReferencesInUse.contains(fileReference))
.filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant))
.collect(Collectors.toSet());
if (fileReferencesToDelete.size() > 0) {
log.log(LogLevel.INFO, "Will delete file references not in use: " + fileReferencesToDelete);
fileReferencesToDelete.forEach(fileReference -> {
File file = new File(fileReferencesPath, fileReference);
if ( ! IOUtils.recursiveDeleteDir(file))
log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath());
});
}
return fileReferencesToDelete;
} | log.log(LogLevel.WARNING, "Failed deleting unused file references for ': " + applicationId + "'", e); | public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath) {
if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory");
Set<String> fileReferencesInUse = new HashSet<>();
Set<ApplicationId> applicationIds = listApplications();
applicationIds.forEach(applicationId -> {
try {
Set<String> fileReferences = getApplication(applicationId).getModel().fileReferences()
.stream()
.map(FileReference::value)
.collect(Collectors.toSet());
fileReferencesInUse.addAll(fileReferences);
} catch (IllegalArgumentException e) {
log.log(LogLevel.WARNING, "Failed deleting unused file references for ': " + applicationId + "'", e);
}
});
log.log(LogLevel.DEBUG, "File references in use : " + fileReferencesInUse);
Set<String> fileReferencesOnDisk = new HashSet<>();
File[] filesOnDisk = fileReferencesPath.listFiles();
if (filesOnDisk != null)
fileReferencesOnDisk.addAll(Arrays.stream(filesOnDisk).map(File::getName).collect(Collectors.toSet()));
log.log(LogLevel.DEBUG, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk);
Instant instant = Instant.now().minus(Duration.ofDays(14));
Set<String> fileReferencesToDelete = fileReferencesOnDisk
.stream()
.filter(fileReference -> ! fileReferencesInUse.contains(fileReference))
.filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant))
.collect(Collectors.toSet());
if (fileReferencesToDelete.size() > 0) {
log.log(LogLevel.INFO, "Will delete file references not in use: " + fileReferencesToDelete);
fileReferencesToDelete.forEach(fileReference -> {
File file = new File(fileReferencesPath, fileReference);
if ( ! IOUtils.recursiveDeleteDir(file))
log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath());
});
}
return fileReferencesToDelete;
} | class ApplicationRepository implements com.yahoo.config.provision.Deployer {
private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName());
private final TenantRepository tenantRepository;
private final Optional<Provisioner> hostProvisioner;
private final ConfigConvergenceChecker convergeChecker;
private final HttpProxy httpProxy;
private final Clock clock;
private final DeployLogger logger = new SilentDeployLogger();
private final ConfigserverConfig configserverConfig;
private final FileDistributionStatus fileDistributionStatus;
private final Orchestrator orchestrator;
@Inject
public ApplicationRepository(TenantRepository tenantRepository,
HostProvisionerProvider hostProvisionerProvider,
ConfigConvergenceChecker configConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig,
Orchestrator orchestrator) {
this(tenantRepository, hostProvisionerProvider.getHostProvisioner(),
configConvergenceChecker, httpProxy, configserverConfig, orchestrator,
Clock.systemUTC(), new FileDistributionStatus());
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
Clock clock) {
this(tenantRepository, hostProvisioner, orchestrator, clock, new ConfigserverConfig(new ConfigserverConfig.Builder()));
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
Clock clock,
ConfigserverConfig configserverConfig) {
this(tenantRepository, Optional.of(hostProvisioner), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()),
configserverConfig, orchestrator, clock, new FileDistributionStatus());
}
private ApplicationRepository(TenantRepository tenantRepository,
Optional<Provisioner> hostProvisioner,
ConfigConvergenceChecker configConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig,
Orchestrator orchestrator,
Clock clock,
FileDistributionStatus fileDistributionStatus) {
this.tenantRepository = tenantRepository;
this.hostProvisioner = hostProvisioner;
this.convergeChecker = configConvergenceChecker;
this.httpProxy = httpProxy;
this.clock = clock;
this.configserverConfig = configserverConfig;
this.orchestrator = orchestrator;
this.fileDistributionStatus = fileDistributionStatus;
}
public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) {
validateThatLocalSessionIsNotActive(tenant, sessionId);
LocalSession session = getLocalSession(tenant, sessionId);
ApplicationId applicationId = prepareParams.getApplicationId();
Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId);
Slime deployLog = createDeployLog();
DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId);
ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now);
logConfigChangeActions(actions, logger);
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. ");
return new PrepareResult(sessionId, actions, deployLog);
}
public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams,
boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) {
PrepareResult result = prepare(tenant, sessionId, prepareParams, now);
activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreLockFailure, ignoreSessionStaleFailure);
return result;
}
public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) {
return deploy(in, prepareParams, false, false, clock.instant());
}
public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams,
boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) {
File tempDir = Files.createTempDir();
PrepareResult prepareResult;
try {
prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now);
} finally {
cleanupTempDirectory(tempDir);
}
return prepareResult;
}
public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) {
return deploy(applicationPackage, prepareParams, false, false, Instant.now());
}
public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams,
boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) {
ApplicationId applicationId = prepareParams.getApplicationId();
long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage);
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
return prepareAndActivate(tenant, sessionId, prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) {
return deployFromLocalActive(application, false);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @param bootstrap the deployment is done when bootstrapping
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application,
boolean bootstrap) {
return deployFromLocalActive(application,
Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)),
bootstrap);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @param timeout the timeout to use for each individual deployment operation
* @param bootstrap the deployment is done when bootstrapping
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application,
Duration timeout,
boolean bootstrap) {
Tenant tenant = tenantRepository.getTenant(application.tenant());
if (tenant == null) return Optional.empty();
LocalSession activeSession = getActiveSession(tenant, application);
if (activeSession == null) return Optional.empty();
TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout);
LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, true, timeoutBudget);
tenant.getLocalSessionRepo().addSession(newSession);
Version version = decideVersion(application, zone().environment(), newSession.getVespaVersion(), bootstrap);
return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock,
false /* don't validate as this is already deployed */, version,
bootstrap));
}
@Override
public Optional<Instant> lastDeployTime(ApplicationId application) {
Tenant tenant = tenantRepository.getTenant(application.tenant());
if (tenant == null) return Optional.empty();
LocalSession activeSession = getActiveSession(tenant, application);
if (activeSession == null) return Optional.empty();
return Optional.of(Instant.ofEpochSecond(activeSession.getCreateTime()));
}
public ApplicationId activate(Tenant tenant,
long sessionId,
TimeoutBudget timeoutBudget,
boolean ignoreLockFailure,
boolean ignoreSessionStaleFailure) {
LocalSession localSession = getLocalSession(tenant, sessionId);
Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft());
deployment.setIgnoreLockFailure(ignoreLockFailure);
deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure);
deployment.activate();
return localSession.getApplicationId();
}
private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) {
return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false);
}
/**
* Deletes an application
*
* @return true if the application was found and deleted, false if it was not present
* @throws RuntimeException if the delete transaction fails. This method is exception safe.
*/
public boolean delete(ApplicationId applicationId) {
return configserverConfig.deleteApplicationLegacy() ? deleteApplicationLegacy(applicationId) : deleteApplication(applicationId);
}
/**
* Deletes an application
*
* @return true if the application was found and deleted, false if it was not present
* @throws RuntimeException if the delete transaction fails. This method is exception safe.
*/
boolean deleteApplication(ApplicationId applicationId) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) return false;
TenantApplications tenantApplications = tenant.getApplicationRepo();
if (!tenantApplications.listApplications().contains(applicationId)) return false;
long sessionId = tenantApplications.getSessionIdForApplication(applicationId);
RemoteSession remoteSession = getRemoteSession(tenant, sessionId);
remoteSession.createDeleteTransaction().commit();
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted");
Duration waitTime = Duration.ofSeconds(60);
if (localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) {
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted");
} else {
log.log(LogLevel.ERROR, TenantRepository.logPre(applicationId) + "Session " + sessionId + " was not deleted (waited " + waitTime + ")");
return false;
}
NestedTransaction transaction = new NestedTransaction();
transaction.add(new Rotations(tenant.getCurator(), tenant.getPath()).delete(applicationId));
transaction.add(tenantApplications.deleteApplication(applicationId));
hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId));
transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId));
transaction.commit();
return true;
}
/**
* Deletes an application the legacy way (if there is more than one config server, the call needs to be done
* on the config server the application was deployed)
*
* @return true if the application was found and deleted, false if it was not present
* @throws RuntimeException if the delete transaction fails. This method is exception safe.
*/
boolean deleteApplicationLegacy(ApplicationId applicationId) {
Optional<Tenant> owner = Optional.ofNullable(tenantRepository.getTenant(applicationId.tenant()));
if (!owner.isPresent()) return false;
TenantApplications tenantApplications = owner.get().getApplicationRepo();
if (!tenantApplications.listApplications().contains(applicationId)) return false;
long sessionId = tenantApplications.getSessionIdForApplication(applicationId);
LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo();
LocalSession session = localSessionRepo.getSession(sessionId);
if (session == null) return false;
NestedTransaction transaction = new NestedTransaction();
localSessionRepo.removeSession(session.getSessionId(), transaction);
session.delete(transaction);
transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId));
transaction.add(tenantApplications.deleteApplication(applicationId));
hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId));
transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId));
transaction.commit();
return true;
}
public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) {
String relativePath = "clustercontroller-status/" + pathSuffix;
return httpProxy.get(getApplication(applicationId), hostName, "container-clustercontroller", relativePath);
}
public Long getApplicationGeneration(ApplicationId applicationId) {
return getApplication(applicationId).getApplicationGeneration();
}
public void restart(ApplicationId applicationId, HostFilter hostFilter) {
hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter));
}
public boolean isSuspended(ApplicationId application) {
return orchestrator.getAllSuspendedApplications().contains(application);
}
public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) {
return fileDistributionStatus.status(getApplication(applicationId), timeout);
}
public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) {
Tenant tenant = tenantRepository.getTenant(tenantName);
return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode);
}
private Application getApplication(ApplicationId applicationId) {
try {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) throw new IllegalArgumentException("Tenant '" + applicationId.tenant() + "' not found");
long sessionId = getSessionIdForApplication(tenant, applicationId);
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0);
return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant());
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed getting application for '" + applicationId + "'", e);
throw e;
}
}
Set<ApplicationId> listApplications() {
return tenantRepository.getAllTenants().stream()
.flatMap(tenant -> tenant.getApplicationRepo().listApplications().stream())
.collect(Collectors.toSet());
}
private boolean isFileLastModifiedBefore(File fileReference, Instant instant) {
BasicFileAttributes fileAttributes;
try {
fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class);
return fileAttributes.lastModifiedTime().toInstant().isBefore(instant);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) {
RemoteSessionRepo remoteSessionRepo = tenantRepository.getTenant(applicationId.tenant()).getRemoteSessionRepo();
Instant end = Instant.now().plus(waitTime);
do {
if (remoteSessionRepo.getSession(sessionId) == null) return true;
try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */}
} while (Instant.now().isBefore(end));
return false;
}
public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout) {
return convergeChecker.checkService(getApplication(applicationId), hostAndPort, uri, timeout);
}
public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService) {
return convergeChecker.servicesToCheck(getApplication(applicationId), uri, timeoutPerService);
}
public HttpResponse getLogs(ApplicationId applicationId, String apiParams) {
String logServerURI = getLogServerURI(applicationId) + apiParams;
LogRetriever logRetriever = new LogRetriever();
return logRetriever.getLogs(logServerURI);
}
/**
* Gets the active Session for the given application id.
*
* @return the active session, or null if there is no active session for the given application id.
*/
public LocalSession getActiveSession(ApplicationId applicationId) {
return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId);
}
public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
if (applicationRepo == null)
throw new IllegalArgumentException("Application repo for tenant '" + tenant.getName() + "' not found");
return applicationRepo.getSessionIdForApplication(applicationId);
}
public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if ( ! Session.Status.PREPARE.equals(session.getStatus()))
throw new IllegalStateException("Session not prepared: " + sessionId);
}
public long createSessionFromExisting(ApplicationId applicationId,
DeployLogger logger,
boolean internalRedeploy,
TimeoutBudget timeoutBudget) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession fromSession = getExistingSession(tenant, applicationId);
LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) {
File tempDir = Files.createTempDir();
long sessionId;
try {
sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir));
} finally {
cleanupTempDirectory(tempDir);
}
return sessionId;
}
public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public void deleteExpiredLocalSessions() {
listApplications().forEach(app -> {
Tenant tenant = tenantRepository.getTenant(app.tenant());
if (tenant == null)
log.log(LogLevel.WARNING, "Cannot delete expired local sessions for tenant '" + app.tenant() + "', tenant not found");
else
tenant.getLocalSessionRepo().purgeOldSessions();
});
}
public int deleteExpiredRemoteSessions(Duration expiryTime) {
return listApplications()
.stream()
.map(app -> {
Tenant tenant = tenantRepository.getTenant(app.tenant());
if (tenant == null) {
log.log(LogLevel.WARNING, "Cannot delete expired remote sessions for tenant '" + app.tenant() + "', tenant not found");
return 0;
} else {
return tenant.getRemoteSessionRepo().deleteExpiredSessions(expiryTime);
}
})
.mapToInt(i -> i)
.sum();
}
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) {
return tenantRepository.getAllTenantNames().stream()
.filter(tenantName -> activeApplications(tenantName).isEmpty())
.filter(tenantName -> !tenantName.equals(TenantName.defaultName()))
.filter(tenantName -> !tenantName.equals(TenantRepository.HOSTED_VESPA_TENANT))
.filter(tenantName -> tenantRepository.getTenant(tenantName).getCreatedTime().isBefore(now.minus(ttlForUnusedTenant)))
.peek(tenantRepository::deleteTenant)
.collect(Collectors.toSet());
}
public void deleteTenant(TenantName tenantName) {
List<ApplicationId> activeApplications = activeApplications(tenantName);
if (activeApplications.isEmpty())
tenantRepository.deleteTenant(tenantName);
else
throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications);
}
private List<ApplicationId> activeApplications(TenantName tenantName) {
return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications();
}
public Tenant verifyTenantAndApplication(ApplicationId applicationId) {
TenantName tenantName = applicationId.tenant();
if (!tenantRepository.checkThatTenantExists(tenantName)) {
throw new IllegalArgumentException("Tenant " + tenantName + " was not found.");
}
Tenant tenant = tenantRepository.getTenant(tenantName);
List<ApplicationId> applicationIds = listApplicationIds(tenant);
if (!applicationIds.contains(applicationId)) {
throw new IllegalArgumentException("No such application id: " + applicationId);
}
return tenant;
}
public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) {
return getLocalSession(tenant, sessionId).getMetaData();
}
public ConfigserverConfig configserverConfig() {
return configserverConfig;
}
private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) {
LocalSession session = getLocalSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
private LocalSession getLocalSession(Tenant tenant, long sessionId) {
LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private RemoteSession getRemoteSession(Tenant tenant, long sessionId) {
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) {
Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty();
TenantApplications applicationRepo = tenant.getApplicationRepo();
try {
long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId);
RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId);
if (currentActiveSession != null) {
currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded());
}
} catch (IllegalArgumentException e) {
}
return currentActiveApplicationSet;
}
private File decompressApplication(InputStream in, String contentType, File tempDir) {
try (CompressedApplicationInputStream application =
CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) {
return decompressApplication(application, tempDir);
} catch (IOException e) {
throw new IllegalArgumentException("Unable to decompress data in body", e);
}
}
private File decompressApplication(CompressedApplicationInputStream in, File tempDir) {
try {
return in.decompress(tempDir);
} catch (IOException e) {
throw new IllegalArgumentException("Unable to decompress stream", e);
}
}
private List<ApplicationId> listApplicationIds(Tenant tenant) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return applicationRepo.listApplications();
}
private void cleanupTempDirectory(File tempDir) {
logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'");
if (!IOUtils.recursiveDeleteDir(tempDir)) {
logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'");
}
}
private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId));
}
private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
if (applicationRepo.listApplications().contains(applicationId)) {
return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId));
}
return null;
}
private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) {
RestartActions restartActions = actions.getRestartActions();
if ( ! restartActions.isEmpty()) {
logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" +
restartActions.format());
}
RefeedActions refeedActions = actions.getRefeedActions();
if ( ! refeedActions.isEmpty()) {
boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed);
logger.log(allAllowed ? Level.INFO : Level.WARNING,
"Change(s) between active and new application that may require re-feed:\n" +
refeedActions.format());
}
}
private String getLogServerURI(ApplicationId applicationId) {
Application application = getApplication(applicationId);
Collection<HostInfo> hostInfos = application.getModel().getHosts();
HostInfo logServerHostInfo = hostInfos.stream()
.filter(host -> host.getServices().stream()
.filter(serviceInfo ->
serviceInfo.getServiceType().equalsIgnoreCase("logserver"))
.count() > 0)
.findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HostInfo for LogServer"));
ServiceInfo containerServiceInfo = logServerHostInfo.getServices().stream()
.filter(service -> service.getServiceType().equals("container"))
.findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host"));
int port = containerServiceInfo.getPorts().stream()
.filter(portInfo -> portInfo.getTags().stream()
.filter(tag -> tag.equalsIgnoreCase("http")).count() > 0)
.findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port"))
.getPort();
return "http:
}
/** Returns version to use when deploying application in given environment */
static Version decideVersion(ApplicationId application, Environment environment, Version sessionVersion, boolean bootstrap) {
if ( environment.isManuallyDeployed()
&& ! "hosted-vespa".equals(application.tenant().value())
&& ! application.instance().isTester()
&& ! bootstrap) {
return Vtag.currentVersion;
}
return sessionVersion;
}
public Slime createDeployLog() {
Slime deployLog = new Slime();
deployLog.setObject();
return deployLog;
}
public Zone zone() {
return new Zone(SystemName.from(configserverConfig.system()),
Environment.from(configserverConfig.environment()),
RegionName.from(configserverConfig.region()));
}
} | class ApplicationRepository implements com.yahoo.config.provision.Deployer {
private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName());
private final TenantRepository tenantRepository;
private final Optional<Provisioner> hostProvisioner;
private final ConfigConvergenceChecker convergeChecker;
private final HttpProxy httpProxy;
private final Clock clock;
private final DeployLogger logger = new SilentDeployLogger();
private final ConfigserverConfig configserverConfig;
private final FileDistributionStatus fileDistributionStatus;
private final Orchestrator orchestrator;
@Inject
public ApplicationRepository(TenantRepository tenantRepository,
HostProvisionerProvider hostProvisionerProvider,
ConfigConvergenceChecker configConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig,
Orchestrator orchestrator) {
this(tenantRepository, hostProvisionerProvider.getHostProvisioner(),
configConvergenceChecker, httpProxy, configserverConfig, orchestrator,
Clock.systemUTC(), new FileDistributionStatus());
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
Clock clock) {
this(tenantRepository, hostProvisioner, orchestrator, clock, new ConfigserverConfig(new ConfigserverConfig.Builder()));
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
Clock clock,
ConfigserverConfig configserverConfig) {
this(tenantRepository, Optional.of(hostProvisioner), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()),
configserverConfig, orchestrator, clock, new FileDistributionStatus());
}
private ApplicationRepository(TenantRepository tenantRepository,
Optional<Provisioner> hostProvisioner,
ConfigConvergenceChecker configConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig,
Orchestrator orchestrator,
Clock clock,
FileDistributionStatus fileDistributionStatus) {
this.tenantRepository = tenantRepository;
this.hostProvisioner = hostProvisioner;
this.convergeChecker = configConvergenceChecker;
this.httpProxy = httpProxy;
this.clock = clock;
this.configserverConfig = configserverConfig;
this.orchestrator = orchestrator;
this.fileDistributionStatus = fileDistributionStatus;
}
public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) {
validateThatLocalSessionIsNotActive(tenant, sessionId);
LocalSession session = getLocalSession(tenant, sessionId);
ApplicationId applicationId = prepareParams.getApplicationId();
Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId);
Slime deployLog = createDeployLog();
DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId);
ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now);
logConfigChangeActions(actions, logger);
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. ");
return new PrepareResult(sessionId, actions, deployLog);
}
public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams,
boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) {
PrepareResult result = prepare(tenant, sessionId, prepareParams, now);
activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreLockFailure, ignoreSessionStaleFailure);
return result;
}
public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) {
return deploy(in, prepareParams, false, false, clock.instant());
}
public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams,
boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) {
File tempDir = Files.createTempDir();
PrepareResult prepareResult;
try {
prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now);
} finally {
cleanupTempDirectory(tempDir);
}
return prepareResult;
}
public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) {
return deploy(applicationPackage, prepareParams, false, false, Instant.now());
}
public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams,
boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) {
ApplicationId applicationId = prepareParams.getApplicationId();
long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage);
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
return prepareAndActivate(tenant, sessionId, prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) {
return deployFromLocalActive(application, false);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @param bootstrap the deployment is done when bootstrapping
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application,
boolean bootstrap) {
return deployFromLocalActive(application,
Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)),
bootstrap);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @param timeout the timeout to use for each individual deployment operation
* @param bootstrap the deployment is done when bootstrapping
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application,
Duration timeout,
boolean bootstrap) {
Tenant tenant = tenantRepository.getTenant(application.tenant());
if (tenant == null) return Optional.empty();
LocalSession activeSession = getActiveSession(tenant, application);
if (activeSession == null) return Optional.empty();
TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout);
LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, true, timeoutBudget);
tenant.getLocalSessionRepo().addSession(newSession);
Version version = decideVersion(application, zone().environment(), newSession.getVespaVersion(), bootstrap);
return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock,
false /* don't validate as this is already deployed */, version,
bootstrap));
}
@Override
public Optional<Instant> lastDeployTime(ApplicationId application) {
Tenant tenant = tenantRepository.getTenant(application.tenant());
if (tenant == null) return Optional.empty();
LocalSession activeSession = getActiveSession(tenant, application);
if (activeSession == null) return Optional.empty();
return Optional.of(Instant.ofEpochSecond(activeSession.getCreateTime()));
}
public ApplicationId activate(Tenant tenant,
long sessionId,
TimeoutBudget timeoutBudget,
boolean ignoreLockFailure,
boolean ignoreSessionStaleFailure) {
LocalSession localSession = getLocalSession(tenant, sessionId);
Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft());
deployment.setIgnoreLockFailure(ignoreLockFailure);
deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure);
deployment.activate();
return localSession.getApplicationId();
}
private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) {
return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false);
}
/**
* Deletes an application
*
* @return true if the application was found and deleted, false if it was not present
* @throws RuntimeException if the delete transaction fails. This method is exception safe.
*/
public boolean delete(ApplicationId applicationId) {
return configserverConfig.deleteApplicationLegacy() ? deleteApplicationLegacy(applicationId) : deleteApplication(applicationId);
}
/**
* Deletes an application
*
* @return true if the application was found and deleted, false if it was not present
* @throws RuntimeException if the delete transaction fails. This method is exception safe.
*/
boolean deleteApplication(ApplicationId applicationId) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) return false;
TenantApplications tenantApplications = tenant.getApplicationRepo();
if (!tenantApplications.listApplications().contains(applicationId)) return false;
long sessionId = tenantApplications.getSessionIdForApplication(applicationId);
RemoteSession remoteSession = getRemoteSession(tenant, sessionId);
remoteSession.createDeleteTransaction().commit();
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted");
Duration waitTime = Duration.ofSeconds(60);
if (localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) {
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted");
} else {
log.log(LogLevel.ERROR, TenantRepository.logPre(applicationId) + "Session " + sessionId + " was not deleted (waited " + waitTime + ")");
return false;
}
NestedTransaction transaction = new NestedTransaction();
transaction.add(new Rotations(tenant.getCurator(), tenant.getPath()).delete(applicationId));
transaction.add(tenantApplications.deleteApplication(applicationId));
hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId));
transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId));
transaction.commit();
return true;
}
/**
* Deletes an application the legacy way (if there is more than one config server, the call needs to be done
* on the config server the application was deployed)
*
* @return true if the application was found and deleted, false if it was not present
* @throws RuntimeException if the delete transaction fails. This method is exception safe.
*/
boolean deleteApplicationLegacy(ApplicationId applicationId) {
Optional<Tenant> owner = Optional.ofNullable(tenantRepository.getTenant(applicationId.tenant()));
if (!owner.isPresent()) return false;
TenantApplications tenantApplications = owner.get().getApplicationRepo();
if (!tenantApplications.listApplications().contains(applicationId)) return false;
long sessionId = tenantApplications.getSessionIdForApplication(applicationId);
LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo();
LocalSession session = localSessionRepo.getSession(sessionId);
if (session == null) return false;
NestedTransaction transaction = new NestedTransaction();
localSessionRepo.removeSession(session.getSessionId(), transaction);
session.delete(transaction);
transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId));
transaction.add(tenantApplications.deleteApplication(applicationId));
hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId));
transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId));
transaction.commit();
return true;
}
public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) {
String relativePath = "clustercontroller-status/" + pathSuffix;
return httpProxy.get(getApplication(applicationId), hostName, "container-clustercontroller", relativePath);
}
public Long getApplicationGeneration(ApplicationId applicationId) {
return getApplication(applicationId).getApplicationGeneration();
}
public void restart(ApplicationId applicationId, HostFilter hostFilter) {
hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter));
}
public boolean isSuspended(ApplicationId application) {
return orchestrator.getAllSuspendedApplications().contains(application);
}
public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) {
return fileDistributionStatus.status(getApplication(applicationId), timeout);
}
public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) {
Tenant tenant = tenantRepository.getTenant(tenantName);
return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode);
}
private Application getApplication(ApplicationId applicationId) {
try {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) throw new IllegalArgumentException("Tenant '" + applicationId.tenant() + "' not found");
long sessionId = getSessionIdForApplication(tenant, applicationId);
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0);
return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant());
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed getting application for '" + applicationId + "'", e);
throw e;
}
}
Set<ApplicationId> listApplications() {
return tenantRepository.getAllTenants().stream()
.flatMap(tenant -> tenant.getApplicationRepo().listApplications().stream())
.collect(Collectors.toSet());
}
private boolean isFileLastModifiedBefore(File fileReference, Instant instant) {
BasicFileAttributes fileAttributes;
try {
fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class);
return fileAttributes.lastModifiedTime().toInstant().isBefore(instant);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) {
RemoteSessionRepo remoteSessionRepo = tenantRepository.getTenant(applicationId.tenant()).getRemoteSessionRepo();
Instant end = Instant.now().plus(waitTime);
do {
if (remoteSessionRepo.getSession(sessionId) == null) return true;
try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */}
} while (Instant.now().isBefore(end));
return false;
}
public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout) {
return convergeChecker.checkService(getApplication(applicationId), hostAndPort, uri, timeout);
}
public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService) {
return convergeChecker.servicesToCheck(getApplication(applicationId), uri, timeoutPerService);
}
public HttpResponse getLogs(ApplicationId applicationId, String apiParams) {
String logServerURI = getLogServerURI(applicationId) + apiParams;
LogRetriever logRetriever = new LogRetriever();
return logRetriever.getLogs(logServerURI);
}
/**
* Gets the active Session for the given application id.
*
* @return the active session, or null if there is no active session for the given application id.
*/
public LocalSession getActiveSession(ApplicationId applicationId) {
return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId);
}
public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
if (applicationRepo == null)
throw new IllegalArgumentException("Application repo for tenant '" + tenant.getName() + "' not found");
return applicationRepo.getSessionIdForApplication(applicationId);
}
public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if ( ! Session.Status.PREPARE.equals(session.getStatus()))
throw new IllegalStateException("Session not prepared: " + sessionId);
}
public long createSessionFromExisting(ApplicationId applicationId,
DeployLogger logger,
boolean internalRedeploy,
TimeoutBudget timeoutBudget) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession fromSession = getExistingSession(tenant, applicationId);
LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) {
File tempDir = Files.createTempDir();
long sessionId;
try {
sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir));
} finally {
cleanupTempDirectory(tempDir);
}
return sessionId;
}
public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public void deleteExpiredLocalSessions() {
listApplications().forEach(app -> {
Tenant tenant = tenantRepository.getTenant(app.tenant());
if (tenant == null)
log.log(LogLevel.WARNING, "Cannot delete expired local sessions for tenant '" + app.tenant() + "', tenant not found");
else
tenant.getLocalSessionRepo().purgeOldSessions();
});
}
public int deleteExpiredRemoteSessions(Duration expiryTime) {
return listApplications()
.stream()
.map(app -> {
Tenant tenant = tenantRepository.getTenant(app.tenant());
if (tenant == null) {
log.log(LogLevel.WARNING, "Cannot delete expired remote sessions for tenant '" + app.tenant() + "', tenant not found");
return 0;
} else {
return tenant.getRemoteSessionRepo().deleteExpiredSessions(expiryTime);
}
})
.mapToInt(i -> i)
.sum();
}
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) {
return tenantRepository.getAllTenantNames().stream()
.filter(tenantName -> activeApplications(tenantName).isEmpty())
.filter(tenantName -> !tenantName.equals(TenantName.defaultName()))
.filter(tenantName -> !tenantName.equals(TenantRepository.HOSTED_VESPA_TENANT))
.filter(tenantName -> tenantRepository.getTenant(tenantName).getCreatedTime().isBefore(now.minus(ttlForUnusedTenant)))
.peek(tenantRepository::deleteTenant)
.collect(Collectors.toSet());
}
public void deleteTenant(TenantName tenantName) {
List<ApplicationId> activeApplications = activeApplications(tenantName);
if (activeApplications.isEmpty())
tenantRepository.deleteTenant(tenantName);
else
throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications);
}
private List<ApplicationId> activeApplications(TenantName tenantName) {
return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications();
}
public Tenant verifyTenantAndApplication(ApplicationId applicationId) {
TenantName tenantName = applicationId.tenant();
if (!tenantRepository.checkThatTenantExists(tenantName)) {
throw new IllegalArgumentException("Tenant " + tenantName + " was not found.");
}
Tenant tenant = tenantRepository.getTenant(tenantName);
List<ApplicationId> applicationIds = listApplicationIds(tenant);
if (!applicationIds.contains(applicationId)) {
throw new IllegalArgumentException("No such application id: " + applicationId);
}
return tenant;
}
public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) {
return getLocalSession(tenant, sessionId).getMetaData();
}
public ConfigserverConfig configserverConfig() {
return configserverConfig;
}
private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) {
LocalSession session = getLocalSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
private LocalSession getLocalSession(Tenant tenant, long sessionId) {
LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private RemoteSession getRemoteSession(Tenant tenant, long sessionId) {
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) {
Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty();
TenantApplications applicationRepo = tenant.getApplicationRepo();
try {
long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId);
RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId);
if (currentActiveSession != null) {
currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded());
}
} catch (IllegalArgumentException e) {
}
return currentActiveApplicationSet;
}
private File decompressApplication(InputStream in, String contentType, File tempDir) {
try (CompressedApplicationInputStream application =
CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) {
return decompressApplication(application, tempDir);
} catch (IOException e) {
throw new IllegalArgumentException("Unable to decompress data in body", e);
}
}
private File decompressApplication(CompressedApplicationInputStream in, File tempDir) {
try {
return in.decompress(tempDir);
} catch (IOException e) {
throw new IllegalArgumentException("Unable to decompress stream", e);
}
}
private List<ApplicationId> listApplicationIds(Tenant tenant) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return applicationRepo.listApplications();
}
private void cleanupTempDirectory(File tempDir) {
logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'");
if (!IOUtils.recursiveDeleteDir(tempDir)) {
logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'");
}
}
private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId));
}
private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
if (applicationRepo.listApplications().contains(applicationId)) {
return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId));
}
return null;
}
private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) {
RestartActions restartActions = actions.getRestartActions();
if ( ! restartActions.isEmpty()) {
logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" +
restartActions.format());
}
RefeedActions refeedActions = actions.getRefeedActions();
if ( ! refeedActions.isEmpty()) {
boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed);
logger.log(allAllowed ? Level.INFO : Level.WARNING,
"Change(s) between active and new application that may require re-feed:\n" +
refeedActions.format());
}
}
private String getLogServerURI(ApplicationId applicationId) {
Application application = getApplication(applicationId);
Collection<HostInfo> hostInfos = application.getModel().getHosts();
HostInfo logServerHostInfo = hostInfos.stream()
.filter(host -> host.getServices().stream()
.filter(serviceInfo ->
serviceInfo.getServiceType().equalsIgnoreCase("logserver"))
.count() > 0)
.findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HostInfo for LogServer"));
ServiceInfo containerServiceInfo = logServerHostInfo.getServices().stream()
.filter(service -> service.getServiceType().equals("container"))
.findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host"));
int port = containerServiceInfo.getPorts().stream()
.filter(portInfo -> portInfo.getTags().stream()
.filter(tag -> tag.equalsIgnoreCase("http")).count() > 0)
.findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port"))
.getPort();
return "http:
}
/** Returns version to use when deploying application in given environment */
static Version decideVersion(ApplicationId application, Environment environment, Version sessionVersion, boolean bootstrap) {
if ( environment.isManuallyDeployed()
&& ! "hosted-vespa".equals(application.tenant().value())
&& ! application.instance().isTester()
&& ! bootstrap) {
return Vtag.currentVersion;
}
return sessionVersion;
}
public Slime createDeployLog() {
Slime deployLog = new Slime();
deployLog.setObject();
return deployLog;
}
public Zone zone() {
return new Zone(SystemName.from(configserverConfig.system()),
Environment.from(configserverConfig.environment()),
RegionName.from(configserverConfig.region()));
}
} |
What is the `SSLContext`'s behavior if there is no trust store provided? I.e. should we enforce its presence? | private static SSLContext createSslContext(TransportSecurityOptions options) {
SslContextBuilder builder = new SslContextBuilder();
options.getCertificatesFile()
.ifPresent(certificates -> builder.withKeyStore(options.getPrivateKeyFile().get(), certificates));
options.getCaCertificatesFile().ifPresent(builder::withTrustStore);
return builder.build();
} | options.getCaCertificatesFile().ifPresent(builder::withTrustStore); | private static SSLContext createSslContext(TransportSecurityOptions options) {
SslContextBuilder builder = new SslContextBuilder();
options.getCertificatesFile()
.ifPresent(certificates -> builder.withKeyStore(options.getPrivateKeyFile().get(), certificates));
options.getCaCertificatesFile().ifPresent(builder::withTrustStore);
return builder.build();
} | class TlsCryptoEngine implements CryptoEngine {
private final SSLContext sslContext;
public TlsCryptoEngine(SSLContext sslContext) {
this.sslContext = sslContext;
}
public TlsCryptoEngine(TransportSecurityOptions options) {
this(createSslContext(options));
}
@Override
public TlsCryptoSocket createCryptoSocket(SocketChannel channel, boolean isServer) {
SSLEngine sslEngine = sslContext.createSSLEngine();
sslEngine.setNeedClientAuth(true);
sslEngine.setUseClientMode(!isServer);
return new TlsCryptoSocket(channel, sslEngine);
}
} | class TlsCryptoEngine implements CryptoEngine {
private final SSLContext sslContext;
public TlsCryptoEngine(SSLContext sslContext) {
this.sslContext = sslContext;
}
public TlsCryptoEngine(TransportSecurityOptions options) {
this(createSslContext(options));
}
@Override
public TlsCryptoSocket createCryptoSocket(SocketChannel channel, boolean isServer) {
SSLEngine sslEngine = sslContext.createSSLEngine();
sslEngine.setNeedClientAuth(true);
sslEngine.setUseClientMode(!isServer);
return new TlsCryptoSocket(channel, sslEngine);
}
} |
Java will use the cacerts bundle provided by the jdk installation in that scenario. | private static SSLContext createSslContext(TransportSecurityOptions options) {
SslContextBuilder builder = new SslContextBuilder();
options.getCertificatesFile()
.ifPresent(certificates -> builder.withKeyStore(options.getPrivateKeyFile().get(), certificates));
options.getCaCertificatesFile().ifPresent(builder::withTrustStore);
return builder.build();
} | options.getCaCertificatesFile().ifPresent(builder::withTrustStore); | private static SSLContext createSslContext(TransportSecurityOptions options) {
SslContextBuilder builder = new SslContextBuilder();
options.getCertificatesFile()
.ifPresent(certificates -> builder.withKeyStore(options.getPrivateKeyFile().get(), certificates));
options.getCaCertificatesFile().ifPresent(builder::withTrustStore);
return builder.build();
} | class TlsCryptoEngine implements CryptoEngine {
private final SSLContext sslContext;
public TlsCryptoEngine(SSLContext sslContext) {
this.sslContext = sslContext;
}
public TlsCryptoEngine(TransportSecurityOptions options) {
this(createSslContext(options));
}
@Override
public TlsCryptoSocket createCryptoSocket(SocketChannel channel, boolean isServer) {
SSLEngine sslEngine = sslContext.createSSLEngine();
sslEngine.setNeedClientAuth(true);
sslEngine.setUseClientMode(!isServer);
return new TlsCryptoSocket(channel, sslEngine);
}
} | class TlsCryptoEngine implements CryptoEngine {
private final SSLContext sslContext;
public TlsCryptoEngine(SSLContext sslContext) {
this.sslContext = sslContext;
}
public TlsCryptoEngine(TransportSecurityOptions options) {
this(createSslContext(options));
}
@Override
public TlsCryptoSocket createCryptoSocket(SocketChannel channel, boolean isServer) {
SSLEngine sslEngine = sslContext.createSSLEngine();
sslEngine.setNeedClientAuth(true);
sslEngine.setUseClientMode(!isServer);
return new TlsCryptoSocket(channel, sslEngine);
}
} |
👍 | private static PeerPolicy toPeerPolicy(AuthorizedPeer authorizedPeer) {
if (authorizedPeer.name == null) {
throw missingFieldException("name");
}
if (authorizedPeer.requiredCredentials.isEmpty()) {
throw missingFieldException("required-credentials");
}
return new PeerPolicy(authorizedPeer.name, toRequestPeerCredentials(authorizedPeer.requiredCredentials));
} | throw missingFieldException("name"); | private static PeerPolicy toPeerPolicy(AuthorizedPeer authorizedPeer) {
if (authorizedPeer.name == null) {
throw missingFieldException("name");
}
if (authorizedPeer.requiredCredentials.isEmpty()) {
throw missingFieldException("required-credentials");
}
return new PeerPolicy(authorizedPeer.name, toRequestPeerCredentials(authorizedPeer.requiredCredentials));
} | class TransportSecurityOptionsJsonSerializer {
private static final ObjectMapper mapper = new ObjectMapper();
public TransportSecurityOptions deserialize(InputStream in) {
try {
TransportSecurityOptionsEntity entity = mapper.readValue(in, TransportSecurityOptionsEntity.class);
return toTransportSecurityOptions(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void serialize(OutputStream out, TransportSecurityOptions options) {
try {
mapper.writeValue(out, toTransportSecurityOptionsEntity(options));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static TransportSecurityOptions toTransportSecurityOptions(TransportSecurityOptionsEntity entity) {
TransportSecurityOptions.Builder builder = new TransportSecurityOptions.Builder();
Files files = entity.files;
if (files != null) {
if (files.certificatesFile != null && files.privateKeyFile != null) {
builder.withCertificate(Paths.get(files.certificatesFile), Paths.get(files.privateKeyFile));
} else if (files.certificatesFile != null || files.privateKeyFile != null) {
throw new IllegalArgumentException("Both 'private-key' and 'certificates' must be configured together");
}
if (files.caCertificatesFile != null) {
builder.withCaCertificate(Paths.get(files.caCertificatesFile));
}
}
List<AuthorizedPeer> authorizedPeersEntity = entity.authorizedPeers;
if (authorizedPeersEntity.size() > 0) {
builder.withAuthorizedPeers(new AuthorizedPeers(toPeerPolicies(authorizedPeersEntity)));
}
return builder.build();
}
private static Set<PeerPolicy> toPeerPolicies(List<AuthorizedPeer> authorizedPeersEntity) {
return authorizedPeersEntity.stream()
.map(TransportSecurityOptionsJsonSerializer::toPeerPolicy)
.collect(toSet());
}
private static List<RequiredPeerCredential> toRequestPeerCredentials(List<RequiredCredential> requiredCredentials) {
return requiredCredentials.stream()
.map(TransportSecurityOptionsJsonSerializer::toRequiredPeerCredential)
.collect(toList());
}
private static RequiredPeerCredential toRequiredPeerCredential(RequiredCredential requiredCredential) {
if (requiredCredential.field == null) {
throw new IllegalArgumentException("field");
}
if (requiredCredential.matchExpression == null) {
throw new IllegalArgumentException("must-match");
}
return new RequiredPeerCredential(toField(requiredCredential.field), new HostGlobPattern(requiredCredential.matchExpression));
}
private static RequiredPeerCredential.Field toField(CredentialField field) {
switch (field) {
case CN: return RequiredPeerCredential.Field.CN;
case SAN_DNS: return RequiredPeerCredential.Field.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static TransportSecurityOptionsEntity toTransportSecurityOptionsEntity(TransportSecurityOptions options) {
TransportSecurityOptionsEntity entity = new TransportSecurityOptionsEntity();
entity.files = new Files();
options.getCaCertificatesFile().ifPresent(value -> entity.files.caCertificatesFile = value.toString());
options.getCertificatesFile().ifPresent(value -> entity.files.certificatesFile = value.toString());
options.getPrivateKeyFile().ifPresent(value -> entity.files.privateKeyFile = value.toString());
options.getAuthorizedPeers().ifPresent( authorizedPeers -> {
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
AuthorizedPeer authorizedPeer = new AuthorizedPeer();
authorizedPeer.name = peerPolicy.peerName();
for (RequiredPeerCredential requiredPeerCredential : peerPolicy.requiredCredentials()) {
RequiredCredential requiredCredential = new RequiredCredential();
requiredCredential.field = toField(requiredPeerCredential.field());
requiredCredential.matchExpression = requiredPeerCredential.pattern().asString();
authorizedPeer.requiredCredentials.add(requiredCredential);
}
entity.authorizedPeers.add(authorizedPeer);
}
});
return entity;
}
private static CredentialField toField(RequiredPeerCredential.Field field) {
switch (field) {
case CN: return CredentialField.CN;
case SAN_DNS: return CredentialField.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static IllegalArgumentException missingFieldException(String fieldName) {
return new IllegalArgumentException(String.format("'%s' missing", fieldName));
}
} | class TransportSecurityOptionsJsonSerializer {
private static final ObjectMapper mapper = new ObjectMapper();
public TransportSecurityOptions deserialize(InputStream in) {
try {
TransportSecurityOptionsEntity entity = mapper.readValue(in, TransportSecurityOptionsEntity.class);
return toTransportSecurityOptions(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void serialize(OutputStream out, TransportSecurityOptions options) {
try {
mapper.writeValue(out, toTransportSecurityOptionsEntity(options));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static TransportSecurityOptions toTransportSecurityOptions(TransportSecurityOptionsEntity entity) {
TransportSecurityOptions.Builder builder = new TransportSecurityOptions.Builder();
Files files = entity.files;
if (files != null) {
if (files.certificatesFile != null && files.privateKeyFile != null) {
builder.withCertificates(Paths.get(files.certificatesFile), Paths.get(files.privateKeyFile));
} else if (files.certificatesFile != null || files.privateKeyFile != null) {
throw new IllegalArgumentException("Both 'private-key' and 'certificates' must be configured together");
}
if (files.caCertificatesFile != null) {
builder.withCaCertificates(Paths.get(files.caCertificatesFile));
}
}
List<AuthorizedPeer> authorizedPeersEntity = entity.authorizedPeers;
if (authorizedPeersEntity.size() > 0) {
builder.withAuthorizedPeers(new AuthorizedPeers(toPeerPolicies(authorizedPeersEntity)));
}
return builder.build();
}
private static Set<PeerPolicy> toPeerPolicies(List<AuthorizedPeer> authorizedPeersEntity) {
return authorizedPeersEntity.stream()
.map(TransportSecurityOptionsJsonSerializer::toPeerPolicy)
.collect(toSet());
}
private static List<RequiredPeerCredential> toRequestPeerCredentials(List<RequiredCredential> requiredCredentials) {
return requiredCredentials.stream()
.map(TransportSecurityOptionsJsonSerializer::toRequiredPeerCredential)
.collect(toList());
}
private static RequiredPeerCredential toRequiredPeerCredential(RequiredCredential requiredCredential) {
if (requiredCredential.field == null) {
throw missingFieldException("field");
}
if (requiredCredential.matchExpression == null) {
throw missingFieldException("must-match");
}
return new RequiredPeerCredential(toField(requiredCredential.field), new HostGlobPattern(requiredCredential.matchExpression));
}
private static RequiredPeerCredential.Field toField(CredentialField field) {
switch (field) {
case CN: return RequiredPeerCredential.Field.CN;
case SAN_DNS: return RequiredPeerCredential.Field.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static TransportSecurityOptionsEntity toTransportSecurityOptionsEntity(TransportSecurityOptions options) {
TransportSecurityOptionsEntity entity = new TransportSecurityOptionsEntity();
entity.files = new Files();
options.getCaCertificatesFile().ifPresent(value -> entity.files.caCertificatesFile = value.toString());
options.getCertificatesFile().ifPresent(value -> entity.files.certificatesFile = value.toString());
options.getPrivateKeyFile().ifPresent(value -> entity.files.privateKeyFile = value.toString());
options.getAuthorizedPeers().ifPresent( authorizedPeers -> {
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
AuthorizedPeer authorizedPeer = new AuthorizedPeer();
authorizedPeer.name = peerPolicy.peerName();
for (RequiredPeerCredential requiredPeerCredential : peerPolicy.requiredCredentials()) {
RequiredCredential requiredCredential = new RequiredCredential();
requiredCredential.field = toField(requiredPeerCredential.field());
requiredCredential.matchExpression = requiredPeerCredential.pattern().asString();
authorizedPeer.requiredCredentials.add(requiredCredential);
}
entity.authorizedPeers.add(authorizedPeer);
}
});
return entity;
}
private static CredentialField toField(RequiredPeerCredential.Field field) {
switch (field) {
case CN: return CredentialField.CN;
case SAN_DNS: return CredentialField.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static IllegalArgumentException missingFieldException(String fieldName) {
return new IllegalArgumentException(String.format("'%s' missing", fieldName));
}
} |
This is removed on Vespa 7 but can't be removed on master. | public Optional<Integer> activeNodes() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveNodes();
} | if ( ! pongPacket.isPresent()) return Optional.empty(); | public Optional<Integer> activeNodes() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveNodes();
} | class Pong {
private String pingInfo="";
private final List<ErrorMessage> errors = new ArrayList<>(1);
private final Optional<PongPacket> pongPacket;
private ElapsedTime elapsed = new ElapsedTime();
public Pong() {
this.pongPacket = Optional.empty();
}
public Pong(ErrorMessage error) {
errors.add(error);
this.pongPacket = Optional.empty();
}
public Pong(PongPacket pongPacket) {
this.pongPacket = Optional.of(pongPacket);
}
public void addError(ErrorMessage error) {
errors.add(error);
}
public ErrorMessage getError(int i) {
return errors.get(i);
}
public int getErrorSize() {
return errors.size();
}
/** Returns the number of active documents in the backend responding in this Pong, if available */
public Optional<Long> activeDocuments() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveDocuments();
}
/** Returns the number of nodes which responded to this Pong, if available */
public List<ErrorMessage> getErrors() {
return Collections.unmodifiableList(errors);
}
/** Returns whether there is an error or not */
public boolean badResponse() {
return ! errors.isEmpty();
}
/** Sets information about the ping used to produce this. This is included when returning the tostring of this. */
public void setPingInfo(String pingInfo) {
if (pingInfo==null)
pingInfo="";
this.pingInfo=pingInfo;
}
/** Returns information about the ping use, or "" (never null) if none */
public String getPingInfo() { return pingInfo; }
public ElapsedTime getElapsedTime() {
return elapsed;
}
/** Returns a string which included the ping info (if any) and any errors added to this */
@Override
public String toString() {
StringBuilder m = new StringBuilder("Result of pinging");
if (pingInfo.length() > 0) {
m.append(" using ");
m.append(pingInfo);
}
if (errors.size() > 0)
m.append(" ");
for (int i = 0; i < errors.size(); i++) {
m.append(errors.get(i).toString());
if ( i <errors.size()-1)
m.append(", ");
}
return m.toString();
}
} | class Pong {
private String pingInfo="";
private final List<ErrorMessage> errors = new ArrayList<>(1);
private final Optional<PongPacket> pongPacket;
private ElapsedTime elapsed = new ElapsedTime();
public Pong() {
this.pongPacket = Optional.empty();
}
public Pong(ErrorMessage error) {
errors.add(error);
this.pongPacket = Optional.empty();
}
public Pong(PongPacket pongPacket) {
this.pongPacket = Optional.of(pongPacket);
}
public void addError(ErrorMessage error) {
errors.add(error);
}
public ErrorMessage getError(int i) {
return errors.get(i);
}
public int getErrorSize() {
return errors.size();
}
/** Returns the number of active documents in the backend responding in this Pong, if available */
public Optional<Long> activeDocuments() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveDocuments();
}
/** Returns the number of nodes which responded to this Pong, if available */
public List<ErrorMessage> getErrors() {
return Collections.unmodifiableList(errors);
}
/** Returns whether there is an error or not */
public boolean badResponse() {
return ! errors.isEmpty();
}
/** Sets information about the ping used to produce this. This is included when returning the tostring of this. */
public void setPingInfo(String pingInfo) {
if (pingInfo==null)
pingInfo="";
this.pingInfo=pingInfo;
}
/** Returns information about the ping use, or "" (never null) if none */
public String getPingInfo() { return pingInfo; }
public ElapsedTime getElapsedTime() {
return elapsed;
}
/** Returns a string which included the ping info (if any) and any errors added to this */
@Override
public String toString() {
StringBuilder m = new StringBuilder("Result of pinging");
if (pingInfo.length() > 0) {
m.append(" using ");
m.append(pingInfo);
}
if (errors.size() > 0)
m.append(" ");
for (int i = 0; i < errors.size(); i++) {
m.append(errors.get(i).toString());
if ( i <errors.size()-1)
m.append(", ");
}
return m.toString();
}
} |
Why not ? prelude.is not PublicApi | public Optional<Integer> activeNodes() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveNodes();
} | if ( ! pongPacket.isPresent()) return Optional.empty(); | public Optional<Integer> activeNodes() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveNodes();
} | class Pong {
private String pingInfo="";
private final List<ErrorMessage> errors = new ArrayList<>(1);
private final Optional<PongPacket> pongPacket;
private ElapsedTime elapsed = new ElapsedTime();
public Pong() {
this.pongPacket = Optional.empty();
}
public Pong(ErrorMessage error) {
errors.add(error);
this.pongPacket = Optional.empty();
}
public Pong(PongPacket pongPacket) {
this.pongPacket = Optional.of(pongPacket);
}
public void addError(ErrorMessage error) {
errors.add(error);
}
public ErrorMessage getError(int i) {
return errors.get(i);
}
public int getErrorSize() {
return errors.size();
}
/** Returns the number of active documents in the backend responding in this Pong, if available */
public Optional<Long> activeDocuments() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveDocuments();
}
/** Returns the number of nodes which responded to this Pong, if available */
public List<ErrorMessage> getErrors() {
return Collections.unmodifiableList(errors);
}
/** Returns whether there is an error or not */
public boolean badResponse() {
return ! errors.isEmpty();
}
/** Sets information about the ping used to produce this. This is included when returning the tostring of this. */
public void setPingInfo(String pingInfo) {
if (pingInfo==null)
pingInfo="";
this.pingInfo=pingInfo;
}
/** Returns information about the ping use, or "" (never null) if none */
public String getPingInfo() { return pingInfo; }
public ElapsedTime getElapsedTime() {
return elapsed;
}
/** Returns a string which included the ping info (if any) and any errors added to this */
@Override
public String toString() {
StringBuilder m = new StringBuilder("Result of pinging");
if (pingInfo.length() > 0) {
m.append(" using ");
m.append(pingInfo);
}
if (errors.size() > 0)
m.append(" ");
for (int i = 0; i < errors.size(); i++) {
m.append(errors.get(i).toString());
if ( i <errors.size()-1)
m.append(", ");
}
return m.toString();
}
} | class Pong {
private String pingInfo="";
private final List<ErrorMessage> errors = new ArrayList<>(1);
private final Optional<PongPacket> pongPacket;
private ElapsedTime elapsed = new ElapsedTime();
public Pong() {
this.pongPacket = Optional.empty();
}
public Pong(ErrorMessage error) {
errors.add(error);
this.pongPacket = Optional.empty();
}
public Pong(PongPacket pongPacket) {
this.pongPacket = Optional.of(pongPacket);
}
public void addError(ErrorMessage error) {
errors.add(error);
}
public ErrorMessage getError(int i) {
return errors.get(i);
}
public int getErrorSize() {
return errors.size();
}
/** Returns the number of active documents in the backend responding in this Pong, if available */
public Optional<Long> activeDocuments() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveDocuments();
}
/** Returns the number of nodes which responded to this Pong, if available */
public List<ErrorMessage> getErrors() {
return Collections.unmodifiableList(errors);
}
/** Returns whether there is an error or not */
public boolean badResponse() {
return ! errors.isEmpty();
}
/** Sets information about the ping used to produce this. This is included when returning the tostring of this. */
public void setPingInfo(String pingInfo) {
if (pingInfo==null)
pingInfo="";
this.pingInfo=pingInfo;
}
/** Returns information about the ping use, or "" (never null) if none */
public String getPingInfo() { return pingInfo; }
public ElapsedTime getElapsedTime() {
return elapsed;
}
/** Returns a string which included the ping info (if any) and any errors added to this */
@Override
public String toString() {
StringBuilder m = new StringBuilder("Result of pinging");
if (pingInfo.length() > 0) {
m.append(" using ");
m.append(pingInfo);
}
if (errors.size() > 0)
m.append(" ");
for (int i = 0; i < errors.size(); i++) {
m.append(errors.get(i).toString());
if ( i <errors.size()-1)
m.append(", ");
}
return m.toString();
}
} |
Hm, ok then. | public Optional<Integer> activeNodes() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveNodes();
} | if ( ! pongPacket.isPresent()) return Optional.empty(); | public Optional<Integer> activeNodes() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveNodes();
} | class Pong {
private String pingInfo="";
private final List<ErrorMessage> errors = new ArrayList<>(1);
private final Optional<PongPacket> pongPacket;
private ElapsedTime elapsed = new ElapsedTime();
public Pong() {
this.pongPacket = Optional.empty();
}
public Pong(ErrorMessage error) {
errors.add(error);
this.pongPacket = Optional.empty();
}
public Pong(PongPacket pongPacket) {
this.pongPacket = Optional.of(pongPacket);
}
public void addError(ErrorMessage error) {
errors.add(error);
}
public ErrorMessage getError(int i) {
return errors.get(i);
}
public int getErrorSize() {
return errors.size();
}
/** Returns the number of active documents in the backend responding in this Pong, if available */
public Optional<Long> activeDocuments() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveDocuments();
}
/** Returns the number of nodes which responded to this Pong, if available */
public List<ErrorMessage> getErrors() {
return Collections.unmodifiableList(errors);
}
/** Returns whether there is an error or not */
public boolean badResponse() {
return ! errors.isEmpty();
}
/** Sets information about the ping used to produce this. This is included when returning the tostring of this. */
public void setPingInfo(String pingInfo) {
if (pingInfo==null)
pingInfo="";
this.pingInfo=pingInfo;
}
/** Returns information about the ping use, or "" (never null) if none */
public String getPingInfo() { return pingInfo; }
public ElapsedTime getElapsedTime() {
return elapsed;
}
/** Returns a string which included the ping info (if any) and any errors added to this */
@Override
public String toString() {
StringBuilder m = new StringBuilder("Result of pinging");
if (pingInfo.length() > 0) {
m.append(" using ");
m.append(pingInfo);
}
if (errors.size() > 0)
m.append(" ");
for (int i = 0; i < errors.size(); i++) {
m.append(errors.get(i).toString());
if ( i <errors.size()-1)
m.append(", ");
}
return m.toString();
}
} | class Pong {
private String pingInfo="";
private final List<ErrorMessage> errors = new ArrayList<>(1);
private final Optional<PongPacket> pongPacket;
private ElapsedTime elapsed = new ElapsedTime();
public Pong() {
this.pongPacket = Optional.empty();
}
public Pong(ErrorMessage error) {
errors.add(error);
this.pongPacket = Optional.empty();
}
public Pong(PongPacket pongPacket) {
this.pongPacket = Optional.of(pongPacket);
}
public void addError(ErrorMessage error) {
errors.add(error);
}
public ErrorMessage getError(int i) {
return errors.get(i);
}
public int getErrorSize() {
return errors.size();
}
/** Returns the number of active documents in the backend responding in this Pong, if available */
public Optional<Long> activeDocuments() {
if ( ! pongPacket.isPresent()) return Optional.empty();
return pongPacket.get().getActiveDocuments();
}
/** Returns the number of nodes which responded to this Pong, if available */
public List<ErrorMessage> getErrors() {
return Collections.unmodifiableList(errors);
}
/** Returns whether there is an error or not */
public boolean badResponse() {
return ! errors.isEmpty();
}
/** Sets information about the ping used to produce this. This is included when returning the tostring of this. */
public void setPingInfo(String pingInfo) {
if (pingInfo==null)
pingInfo="";
this.pingInfo=pingInfo;
}
/** Returns information about the ping use, or "" (never null) if none */
public String getPingInfo() { return pingInfo; }
public ElapsedTime getElapsedTime() {
return elapsed;
}
/** Returns a string which included the ping info (if any) and any errors added to this */
@Override
public String toString() {
StringBuilder m = new StringBuilder("Result of pinging");
if (pingInfo.length() > 0) {
m.append(" using ");
m.append(pingInfo);
}
if (errors.size() > 0)
m.append(" ");
for (int i = 0; i < errors.size(); i++) {
m.append(errors.get(i).toString());
if ( i <errors.size()-1)
m.append(", ");
}
return m.toString();
}
} |
I think `lockedApplication` would be a better name for a locked application than `applicationId` ;) | private void updateConfirmedApplicationOwners() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.ownershipIssueId().isPresent())
.filter(application -> !application.owner().isPresent())
.forEach(application -> {
IssueId ownershipIssueId = application.ownershipIssueId().get();
ownershipIssues.getConfirmedOwner(ownershipIssueId).ifPresent(owner -> {
controller().applications().lockIfPresent(application.id(), applicationId ->
controller().applications().store(applicationId.withOwner(owner)));
});
});
} | controller().applications().lockIfPresent(application.id(), applicationId -> | private void updateConfirmedApplicationOwners() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.ownershipIssueId().isPresent())
.forEach(application -> {
IssueId ownershipIssueId = application.ownershipIssueId().get();
ownershipIssues.getConfirmedOwner(ownershipIssueId).ifPresent(owner -> {
controller().applications().lockIfPresent(application.id(), lockedApplication ->
controller().applications().store(lockedApplication.withOwner(owner)));
});
});
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
updateConfirmedApplicationOwners();
}
/** File an ownership issue with the owners of all applications we know about. */
private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
.forEach(application -> {
try {
Tenant tenant = ownerOf(application.id());
Optional<IssueId> ourIssueId = application.ownershipIssueId();
Contact contact = tenant.contact().orElseThrow(RuntimeException::new);
User asignee = tenant instanceof UserTenant ? userFor(tenant) : null;
ourIssueId = ownershipIssues.confirmOwnership(ourIssueId, application.id(), asignee, contact);
ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to file an issue for '" + application.id() + "': " + Exceptions.toMessageString(e));
}
});
}
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Optional<Contact> contact = Optional.of(application.id())
.map(this::ownerOf)
.filter(t -> t instanceof AthenzTenant)
.flatMap(Tenant::contact);
ownershipIssues.ensureResponse(issueId, contact);
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to escalate issue with id '" + issueId + "': " + Exceptions.toMessageString(e));
}
});
}
private Tenant ownerOf(ApplicationId applicationId) {
return controller().tenants().tenant(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
updateConfirmedApplicationOwners();
}
/** File an ownership issue with the owners of all applications we know about. */
private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
.forEach(application -> {
try {
Tenant tenant = ownerOf(application.id());
Optional<IssueId> ourIssueId = application.ownershipIssueId();
Contact contact = tenant.contact().orElseThrow(RuntimeException::new);
User asignee = tenant instanceof UserTenant ? userFor(tenant) : null;
ourIssueId = ownershipIssues.confirmOwnership(ourIssueId, application.id(), asignee, contact);
ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to file an issue for '" + application.id() + "': " + Exceptions.toMessageString(e));
}
});
}
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Optional<Contact> contact = Optional.of(application.id())
.map(this::ownerOf)
.filter(t -> t instanceof AthenzTenant)
.flatMap(Tenant::contact);
ownershipIssues.ensureResponse(issueId, contact);
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to escalate issue with id '" + issueId + "': " + Exceptions.toMessageString(e));
}
});
}
private Tenant ownerOf(ApplicationId applicationId) {
return controller().tenants().tenant(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} |
This allows the owner to be set only once, i.e., to never change. I think we should allow that. | private void updateConfirmedApplicationOwners() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.ownershipIssueId().isPresent())
.filter(application -> !application.owner().isPresent())
.forEach(application -> {
IssueId ownershipIssueId = application.ownershipIssueId().get();
ownershipIssues.getConfirmedOwner(ownershipIssueId).ifPresent(owner -> {
controller().applications().lockIfPresent(application.id(), applicationId ->
controller().applications().store(applicationId.withOwner(owner)));
});
});
} | .filter(application -> !application.owner().isPresent()) | private void updateConfirmedApplicationOwners() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.ownershipIssueId().isPresent())
.forEach(application -> {
IssueId ownershipIssueId = application.ownershipIssueId().get();
ownershipIssues.getConfirmedOwner(ownershipIssueId).ifPresent(owner -> {
controller().applications().lockIfPresent(application.id(), lockedApplication ->
controller().applications().store(lockedApplication.withOwner(owner)));
});
});
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
updateConfirmedApplicationOwners();
}
/** File an ownership issue with the owners of all applications we know about. */
private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
.forEach(application -> {
try {
Tenant tenant = ownerOf(application.id());
Optional<IssueId> ourIssueId = application.ownershipIssueId();
Contact contact = tenant.contact().orElseThrow(RuntimeException::new);
User asignee = tenant instanceof UserTenant ? userFor(tenant) : null;
ourIssueId = ownershipIssues.confirmOwnership(ourIssueId, application.id(), asignee, contact);
ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to file an issue for '" + application.id() + "': " + Exceptions.toMessageString(e));
}
});
}
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Optional<Contact> contact = Optional.of(application.id())
.map(this::ownerOf)
.filter(t -> t instanceof AthenzTenant)
.flatMap(Tenant::contact);
ownershipIssues.ensureResponse(issueId, contact);
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to escalate issue with id '" + issueId + "': " + Exceptions.toMessageString(e));
}
});
}
private Tenant ownerOf(ApplicationId applicationId) {
return controller().tenants().tenant(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
updateConfirmedApplicationOwners();
}
/** File an ownership issue with the owners of all applications we know about. */
private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
.forEach(application -> {
try {
Tenant tenant = ownerOf(application.id());
Optional<IssueId> ourIssueId = application.ownershipIssueId();
Contact contact = tenant.contact().orElseThrow(RuntimeException::new);
User asignee = tenant instanceof UserTenant ? userFor(tenant) : null;
ourIssueId = ownershipIssues.confirmOwnership(ourIssueId, application.id(), asignee, contact);
ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to file an issue for '" + application.id() + "': " + Exceptions.toMessageString(e));
}
});
}
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Optional<Contact> contact = Optional.of(application.id())
.map(this::ownerOf)
.filter(t -> t instanceof AthenzTenant)
.flatMap(Tenant::contact);
ownershipIssues.ensureResponse(issueId, contact);
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to escalate issue with id '" + issueId + "': " + Exceptions.toMessageString(e));
}
});
}
private Tenant ownerOf(ApplicationId applicationId) {
return controller().tenants().tenant(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} |
Consider adding an assert for the number of dispatchers to ensure that we actually check something in the loop. | public void requireThatDispatchTuningIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>", "</search>"),
joinLines("<tuning>",
"</tuning>"));
for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) {
PartitionsConfig.Builder builder = new PartitionsConfig.Builder();
tld.getConfig(builder);
PartitionsConfig config = new PartitionsConfig(builder);
assertEquals(2, config.dataset(0).searchablecopies());
assertTrue(config.dataset(0).useroundrobinforfixedrow());
}
} | for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) { | public void requireThatDispatchTuningIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>", "</search>"),
joinLines("<tuning>",
"</tuning>"));
assertEquals(1, cluster.getSearch().getIndexed().getTLDs().size());
for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) {
PartitionsConfig.Builder builder = new PartitionsConfig.Builder();
tld.getConfig(builder);
PartitionsConfig config = new PartitionsConfig(builder);
assertEquals(2, config.dataset(0).searchablecopies());
assertTrue(config.dataset(0).useroundrobinforfixedrow());
}
} | class ClusterTest {
@Test
public void requireThatContentSearchIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <query-timeout>1.1</query-timeout>",
" <visibility-delay>2.3</visibility-delay>",
"</search>"));
IndexedSearchCluster searchCluster = cluster.getSearch().getIndexed();
assertNotNull(searchCluster);
assertEquals(1.1, searchCluster.getQueryTimeout(), 1E-6);
assertEquals(2.3, searchCluster.getVisibilityDelay(), 1E-6);
ProtonConfig proton = getProtonConfig(cluster);
assertEquals(searchCluster.getVisibilityDelay(), proton.documentdb(0).visibilitydelay(), 1E-6);
}
@Test
public void requireThatSearchCoverageIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <coverage>",
" <minimum>0.11</minimum>",
" <min-wait-after-coverage-factor>0.23</min-wait-after-coverage-factor>",
" <max-wait-after-coverage-factor>0.58</max-wait-after-coverage-factor>",
" </coverage>",
"</search>"));
for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) {
PartitionsConfig.Builder builder = new PartitionsConfig.Builder();
tld.getConfig(builder);
PartitionsConfig config = new PartitionsConfig(builder);
assertEquals(11.0, config.dataset(0).minimal_searchcoverage(), 1E-6);
assertEquals(0.23, config.dataset(0).higher_coverage_minsearchwait(), 1E-6);
assertEquals(0.58, config.dataset(0).higher_coverage_maxsearchwait(), 1E-6);
assertEquals(2, config.dataset(0).searchablecopies());
assertTrue(config.dataset(0).useroundrobinforfixedrow());
}
}
@Test
@Test
public void requireThatVisibilityDelayIsZeroForGlobalDocumentType() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <visibility-delay>2.3</visibility-delay>",
"</search>"), true);
ProtonConfig proton = getProtonConfig(cluster);
assertEquals(0.0, proton.documentdb(0).visibilitydelay(), 1E-6);
}
private static ContentCluster newContentCluster(String contentSearchXml) throws ParseException {
return newContentCluster(contentSearchXml, "", false);
}
private static ContentCluster newContentCluster(String contentSearchXml, String searchNodeTuningXml) throws ParseException {
return newContentCluster(contentSearchXml, searchNodeTuningXml, false);
}
private static ContentCluster newContentCluster(String contentSearchXml, boolean globalDocType) throws ParseException {
return newContentCluster(contentSearchXml, "", globalDocType);
}
private static ContentCluster newContentCluster(String contentSearchXml, String searchNodeTuningXml, boolean globalDocType) throws ParseException {
ApplicationPackage app = new MockApplicationPackage.Builder()
.withHosts(joinLines("<hosts>",
" <host name='localhost'><alias>my_host</alias></host>",
"</hosts>"))
.withServices(joinLines("<services version='1.0'>",
" <admin version='2.0'>",
" <adminserver hostalias='my_host' />",
" </admin>",
" <content version='1.0'>",
" <redundancy>3</redundancy>",
" <documents>",
" " + getDocumentXml(globalDocType),
" </documents>",
" <engine>",
" <proton>",
" <searchable-copies>2</searchable-copies>",
searchNodeTuningXml,
" </proton>",
" </engine>",
" <group>",
" <node hostalias='my_host' distribution-key='0' />",
" </group>",
contentSearchXml,
" </content>",
"</services>"))
.withSearchDefinitions(ApplicationPackageUtils.generateSearchDefinition("my_document"))
.build();
List<Content> contents = new TestDriver().buildModel(app).getConfigModels(Content.class);
assertEquals(1, contents.size());
return contents.get(0).getCluster();
}
private static String getDocumentXml(boolean globalDocType) {
return "<document mode='index' type='my_document' " + (globalDocType ? "global='true' " : "") + "/>";
}
private static SearchDefinition newSearchDefinition(String name) throws ParseException {
SearchBuilder builder = new SearchBuilder();
builder.importString("search " + name + " { document " + name + " { } }");
builder.build();
return new SearchDefinition(name, builder.getSearch(name));
}
private static ProtonConfig getProtonConfig(ContentCluster cluster) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
cluster.getSearch().getConfig(builder);
return new ProtonConfig(builder);
}
} | class ClusterTest {
@Test
public void requireThatContentSearchIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <query-timeout>1.1</query-timeout>",
" <visibility-delay>2.3</visibility-delay>",
"</search>"));
IndexedSearchCluster searchCluster = cluster.getSearch().getIndexed();
assertNotNull(searchCluster);
assertEquals(1.1, searchCluster.getQueryTimeout(), 1E-6);
assertEquals(2.3, searchCluster.getVisibilityDelay(), 1E-6);
ProtonConfig proton = getProtonConfig(cluster);
assertEquals(searchCluster.getVisibilityDelay(), proton.documentdb(0).visibilitydelay(), 1E-6);
}
@Test
public void requireThatSearchCoverageIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <coverage>",
" <minimum>0.11</minimum>",
" <min-wait-after-coverage-factor>0.23</min-wait-after-coverage-factor>",
" <max-wait-after-coverage-factor>0.58</max-wait-after-coverage-factor>",
" </coverage>",
"</search>"));
assertEquals(1, cluster.getSearch().getIndexed().getTLDs().size());
for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) {
PartitionsConfig.Builder builder = new PartitionsConfig.Builder();
tld.getConfig(builder);
PartitionsConfig config = new PartitionsConfig(builder);
assertEquals(11.0, config.dataset(0).minimal_searchcoverage(), 1E-6);
assertEquals(0.23, config.dataset(0).higher_coverage_minsearchwait(), 1E-6);
assertEquals(0.58, config.dataset(0).higher_coverage_maxsearchwait(), 1E-6);
assertEquals(2, config.dataset(0).searchablecopies());
assertTrue(config.dataset(0).useroundrobinforfixedrow());
}
}
@Test
@Test
public void requireThatVisibilityDelayIsZeroForGlobalDocumentType() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <visibility-delay>2.3</visibility-delay>",
"</search>"), true);
ProtonConfig proton = getProtonConfig(cluster);
assertEquals(0.0, proton.documentdb(0).visibilitydelay(), 1E-6);
}
private static ContentCluster newContentCluster(String contentSearchXml) throws ParseException {
return newContentCluster(contentSearchXml, "", false);
}
private static ContentCluster newContentCluster(String contentSearchXml, String searchNodeTuningXml) throws ParseException {
return newContentCluster(contentSearchXml, searchNodeTuningXml, false);
}
private static ContentCluster newContentCluster(String contentSearchXml, boolean globalDocType) throws ParseException {
return newContentCluster(contentSearchXml, "", globalDocType);
}
private static ContentCluster newContentCluster(String contentSearchXml, String searchNodeTuningXml, boolean globalDocType) throws ParseException {
ApplicationPackage app = new MockApplicationPackage.Builder()
.withHosts(joinLines(
"<hosts>",
" <host name='localhost'><alias>my_host</alias></host>",
"</hosts>"))
.withServices(joinLines(
"<services version='1.0'>",
" <admin version='2.0'>",
" <adminserver hostalias='my_host' />",
" </admin>",
"<jdisc id='foo' version='1.0'>",
" <search />",
" <nodes><node hostalias='my_host' /></nodes>",
"</jdisc>",
" <content version='1.0'>",
" <redundancy>3</redundancy>",
" <documents>",
" " + getDocumentXml(globalDocType),
" </documents>",
" <engine>",
" <proton>",
" <searchable-copies>2</searchable-copies>",
searchNodeTuningXml,
" </proton>",
" </engine>",
" <group>",
" <node hostalias='my_host' distribution-key='0' />",
" </group>",
contentSearchXml,
" </content>",
"</services>"))
.withSearchDefinitions(ApplicationPackageUtils.generateSearchDefinition("my_document"))
.build();
List<Content> contents = new TestDriver().buildModel(app).getConfigModels(Content.class);
assertEquals(1, contents.size());
return contents.get(0).getCluster();
}
private static String getDocumentXml(boolean globalDocType) {
return "<document mode='index' type='my_document' " + (globalDocType ? "global='true' " : "") + "/>";
}
private static SearchDefinition newSearchDefinition(String name) throws ParseException {
SearchBuilder builder = new SearchBuilder();
builder.importString("search " + name + " { document " + name + " { } }");
builder.build();
return new SearchDefinition(name, builder.getSearch(name));
}
private static ProtonConfig getProtonConfig(ContentCluster cluster) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
cluster.getSearch().getConfig(builder);
return new ProtonConfig(builder);
}
} |
Good point. It turned out that a few tests did not test anything. | public void requireThatDispatchTuningIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>", "</search>"),
joinLines("<tuning>",
"</tuning>"));
for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) {
PartitionsConfig.Builder builder = new PartitionsConfig.Builder();
tld.getConfig(builder);
PartitionsConfig config = new PartitionsConfig(builder);
assertEquals(2, config.dataset(0).searchablecopies());
assertTrue(config.dataset(0).useroundrobinforfixedrow());
}
} | for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) { | public void requireThatDispatchTuningIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>", "</search>"),
joinLines("<tuning>",
"</tuning>"));
assertEquals(1, cluster.getSearch().getIndexed().getTLDs().size());
for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) {
PartitionsConfig.Builder builder = new PartitionsConfig.Builder();
tld.getConfig(builder);
PartitionsConfig config = new PartitionsConfig(builder);
assertEquals(2, config.dataset(0).searchablecopies());
assertTrue(config.dataset(0).useroundrobinforfixedrow());
}
} | class ClusterTest {
@Test
public void requireThatContentSearchIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <query-timeout>1.1</query-timeout>",
" <visibility-delay>2.3</visibility-delay>",
"</search>"));
IndexedSearchCluster searchCluster = cluster.getSearch().getIndexed();
assertNotNull(searchCluster);
assertEquals(1.1, searchCluster.getQueryTimeout(), 1E-6);
assertEquals(2.3, searchCluster.getVisibilityDelay(), 1E-6);
ProtonConfig proton = getProtonConfig(cluster);
assertEquals(searchCluster.getVisibilityDelay(), proton.documentdb(0).visibilitydelay(), 1E-6);
}
@Test
public void requireThatSearchCoverageIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <coverage>",
" <minimum>0.11</minimum>",
" <min-wait-after-coverage-factor>0.23</min-wait-after-coverage-factor>",
" <max-wait-after-coverage-factor>0.58</max-wait-after-coverage-factor>",
" </coverage>",
"</search>"));
for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) {
PartitionsConfig.Builder builder = new PartitionsConfig.Builder();
tld.getConfig(builder);
PartitionsConfig config = new PartitionsConfig(builder);
assertEquals(11.0, config.dataset(0).minimal_searchcoverage(), 1E-6);
assertEquals(0.23, config.dataset(0).higher_coverage_minsearchwait(), 1E-6);
assertEquals(0.58, config.dataset(0).higher_coverage_maxsearchwait(), 1E-6);
assertEquals(2, config.dataset(0).searchablecopies());
assertTrue(config.dataset(0).useroundrobinforfixedrow());
}
}
@Test
@Test
public void requireThatVisibilityDelayIsZeroForGlobalDocumentType() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <visibility-delay>2.3</visibility-delay>",
"</search>"), true);
ProtonConfig proton = getProtonConfig(cluster);
assertEquals(0.0, proton.documentdb(0).visibilitydelay(), 1E-6);
}
private static ContentCluster newContentCluster(String contentSearchXml) throws ParseException {
return newContentCluster(contentSearchXml, "", false);
}
private static ContentCluster newContentCluster(String contentSearchXml, String searchNodeTuningXml) throws ParseException {
return newContentCluster(contentSearchXml, searchNodeTuningXml, false);
}
private static ContentCluster newContentCluster(String contentSearchXml, boolean globalDocType) throws ParseException {
return newContentCluster(contentSearchXml, "", globalDocType);
}
private static ContentCluster newContentCluster(String contentSearchXml, String searchNodeTuningXml, boolean globalDocType) throws ParseException {
ApplicationPackage app = new MockApplicationPackage.Builder()
.withHosts(joinLines("<hosts>",
" <host name='localhost'><alias>my_host</alias></host>",
"</hosts>"))
.withServices(joinLines("<services version='1.0'>",
" <admin version='2.0'>",
" <adminserver hostalias='my_host' />",
" </admin>",
" <content version='1.0'>",
" <redundancy>3</redundancy>",
" <documents>",
" " + getDocumentXml(globalDocType),
" </documents>",
" <engine>",
" <proton>",
" <searchable-copies>2</searchable-copies>",
searchNodeTuningXml,
" </proton>",
" </engine>",
" <group>",
" <node hostalias='my_host' distribution-key='0' />",
" </group>",
contentSearchXml,
" </content>",
"</services>"))
.withSearchDefinitions(ApplicationPackageUtils.generateSearchDefinition("my_document"))
.build();
List<Content> contents = new TestDriver().buildModel(app).getConfigModels(Content.class);
assertEquals(1, contents.size());
return contents.get(0).getCluster();
}
private static String getDocumentXml(boolean globalDocType) {
return "<document mode='index' type='my_document' " + (globalDocType ? "global='true' " : "") + "/>";
}
private static SearchDefinition newSearchDefinition(String name) throws ParseException {
SearchBuilder builder = new SearchBuilder();
builder.importString("search " + name + " { document " + name + " { } }");
builder.build();
return new SearchDefinition(name, builder.getSearch(name));
}
private static ProtonConfig getProtonConfig(ContentCluster cluster) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
cluster.getSearch().getConfig(builder);
return new ProtonConfig(builder);
}
} | class ClusterTest {
@Test
public void requireThatContentSearchIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <query-timeout>1.1</query-timeout>",
" <visibility-delay>2.3</visibility-delay>",
"</search>"));
IndexedSearchCluster searchCluster = cluster.getSearch().getIndexed();
assertNotNull(searchCluster);
assertEquals(1.1, searchCluster.getQueryTimeout(), 1E-6);
assertEquals(2.3, searchCluster.getVisibilityDelay(), 1E-6);
ProtonConfig proton = getProtonConfig(cluster);
assertEquals(searchCluster.getVisibilityDelay(), proton.documentdb(0).visibilitydelay(), 1E-6);
}
@Test
public void requireThatSearchCoverageIsApplied() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <coverage>",
" <minimum>0.11</minimum>",
" <min-wait-after-coverage-factor>0.23</min-wait-after-coverage-factor>",
" <max-wait-after-coverage-factor>0.58</max-wait-after-coverage-factor>",
" </coverage>",
"</search>"));
assertEquals(1, cluster.getSearch().getIndexed().getTLDs().size());
for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) {
PartitionsConfig.Builder builder = new PartitionsConfig.Builder();
tld.getConfig(builder);
PartitionsConfig config = new PartitionsConfig(builder);
assertEquals(11.0, config.dataset(0).minimal_searchcoverage(), 1E-6);
assertEquals(0.23, config.dataset(0).higher_coverage_minsearchwait(), 1E-6);
assertEquals(0.58, config.dataset(0).higher_coverage_maxsearchwait(), 1E-6);
assertEquals(2, config.dataset(0).searchablecopies());
assertTrue(config.dataset(0).useroundrobinforfixedrow());
}
}
@Test
@Test
public void requireThatVisibilityDelayIsZeroForGlobalDocumentType() throws ParseException {
ContentCluster cluster = newContentCluster(joinLines("<search>",
" <visibility-delay>2.3</visibility-delay>",
"</search>"), true);
ProtonConfig proton = getProtonConfig(cluster);
assertEquals(0.0, proton.documentdb(0).visibilitydelay(), 1E-6);
}
private static ContentCluster newContentCluster(String contentSearchXml) throws ParseException {
return newContentCluster(contentSearchXml, "", false);
}
private static ContentCluster newContentCluster(String contentSearchXml, String searchNodeTuningXml) throws ParseException {
return newContentCluster(contentSearchXml, searchNodeTuningXml, false);
}
private static ContentCluster newContentCluster(String contentSearchXml, boolean globalDocType) throws ParseException {
return newContentCluster(contentSearchXml, "", globalDocType);
}
private static ContentCluster newContentCluster(String contentSearchXml, String searchNodeTuningXml, boolean globalDocType) throws ParseException {
ApplicationPackage app = new MockApplicationPackage.Builder()
.withHosts(joinLines(
"<hosts>",
" <host name='localhost'><alias>my_host</alias></host>",
"</hosts>"))
.withServices(joinLines(
"<services version='1.0'>",
" <admin version='2.0'>",
" <adminserver hostalias='my_host' />",
" </admin>",
"<jdisc id='foo' version='1.0'>",
" <search />",
" <nodes><node hostalias='my_host' /></nodes>",
"</jdisc>",
" <content version='1.0'>",
" <redundancy>3</redundancy>",
" <documents>",
" " + getDocumentXml(globalDocType),
" </documents>",
" <engine>",
" <proton>",
" <searchable-copies>2</searchable-copies>",
searchNodeTuningXml,
" </proton>",
" </engine>",
" <group>",
" <node hostalias='my_host' distribution-key='0' />",
" </group>",
contentSearchXml,
" </content>",
"</services>"))
.withSearchDefinitions(ApplicationPackageUtils.generateSearchDefinition("my_document"))
.build();
List<Content> contents = new TestDriver().buildModel(app).getConfigModels(Content.class);
assertEquals(1, contents.size());
return contents.get(0).getCluster();
}
private static String getDocumentXml(boolean globalDocType) {
return "<document mode='index' type='my_document' " + (globalDocType ? "global='true' " : "") + "/>";
}
private static SearchDefinition newSearchDefinition(String name) throws ParseException {
SearchBuilder builder = new SearchBuilder();
builder.importString("search " + name + " { document " + name + " { } }");
builder.build();
return new SearchDefinition(name, builder.getSearch(name));
}
private static ProtonConfig getProtonConfig(ContentCluster cluster) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
cluster.getSearch().getConfig(builder);
return new ProtonConfig(builder);
}
} |
`UncheckedIOException`? | public Optional<byte[]> getBytes(FlagId id) {
try {
return Optional.of(Files.readAllBytes(getPath(id)));
} catch (NoSuchFileException e) {
return Optional.empty();
} catch (IOException e) {
throw new UncheckedTimeoutException(e);
}
} | throw new UncheckedTimeoutException(e); | public Optional<byte[]> getBytes(FlagId id) {
try {
return Optional.of(Files.readAllBytes(getPath(id)));
} catch (NoSuchFileException e) {
return Optional.empty();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} | class FileFlagSource implements FlagSource {
static final String FLAGS_DIRECTORY = "/etc/vespa/flags";
private final Path flagsDirectory;
@Inject
public FileFlagSource() {
this(FileSystems.getDefault());
}
public FileFlagSource(FileSystem fileSystem) {
this(fileSystem.getPath(FLAGS_DIRECTORY));
}
public FileFlagSource(Path flagsDirectory) {
this.flagsDirectory = flagsDirectory;
}
@Override
public boolean hasFeature(FlagId id) {
return Files.exists(getPath(id));
}
@Override
public Optional<String> getString(FlagId id) {
return getBytes(id).map(bytes -> new String(bytes, StandardCharsets.UTF_8));
}
private Path getPath(FlagId id) {
return flagsDirectory.resolve(id.toString());
}
@Override
public String toString() {
return "FileFlagSource{" + flagsDirectory + '}';
}
} | class FileFlagSource implements FlagSource {
static final String FLAGS_DIRECTORY = "/etc/vespa/flags";
private final Path flagsDirectory;
@Inject
public FileFlagSource() {
this(FileSystems.getDefault());
}
public FileFlagSource(FileSystem fileSystem) {
this(fileSystem.getPath(FLAGS_DIRECTORY));
}
public FileFlagSource(Path flagsDirectory) {
this.flagsDirectory = flagsDirectory;
}
@Override
public Optional<String> getString(FlagId id) {
return getBytes(id).map(bytes -> new String(bytes, StandardCharsets.UTF_8));
}
private Path getPath(FlagId id) {
return flagsDirectory.resolve(id.toString());
}
@Override
public String toString() {
return "FileFlagSource{" + flagsDirectory + '}';
}
} |
Nice catch, fixed. | public Optional<byte[]> getBytes(FlagId id) {
try {
return Optional.of(Files.readAllBytes(getPath(id)));
} catch (NoSuchFileException e) {
return Optional.empty();
} catch (IOException e) {
throw new UncheckedTimeoutException(e);
}
} | throw new UncheckedTimeoutException(e); | public Optional<byte[]> getBytes(FlagId id) {
try {
return Optional.of(Files.readAllBytes(getPath(id)));
} catch (NoSuchFileException e) {
return Optional.empty();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} | class FileFlagSource implements FlagSource {
static final String FLAGS_DIRECTORY = "/etc/vespa/flags";
private final Path flagsDirectory;
@Inject
public FileFlagSource() {
this(FileSystems.getDefault());
}
public FileFlagSource(FileSystem fileSystem) {
this(fileSystem.getPath(FLAGS_DIRECTORY));
}
public FileFlagSource(Path flagsDirectory) {
this.flagsDirectory = flagsDirectory;
}
@Override
public boolean hasFeature(FlagId id) {
return Files.exists(getPath(id));
}
@Override
public Optional<String> getString(FlagId id) {
return getBytes(id).map(bytes -> new String(bytes, StandardCharsets.UTF_8));
}
private Path getPath(FlagId id) {
return flagsDirectory.resolve(id.toString());
}
@Override
public String toString() {
return "FileFlagSource{" + flagsDirectory + '}';
}
} | class FileFlagSource implements FlagSource {
static final String FLAGS_DIRECTORY = "/etc/vespa/flags";
private final Path flagsDirectory;
@Inject
public FileFlagSource() {
this(FileSystems.getDefault());
}
public FileFlagSource(FileSystem fileSystem) {
this(fileSystem.getPath(FLAGS_DIRECTORY));
}
public FileFlagSource(Path flagsDirectory) {
this.flagsDirectory = flagsDirectory;
}
@Override
public Optional<String> getString(FlagId id) {
return getBytes(id).map(bytes -> new String(bytes, StandardCharsets.UTF_8));
}
private Path getPath(FlagId id) {
return flagsDirectory.resolve(id.toString());
}
@Override
public String toString() {
return "FileFlagSource{" + flagsDirectory + '}';
}
} |
In the face of multiple CNs, RFC 6125 states that the "most specific" CN should be used. Going by e.g. [Boost.Asio's RFC2818 implementation](https://github.com/boostorg/asio/blob/develop/include/boost/asio/ssl/impl/rfc2818_verification.ipp#L104) this looks to be the _last_ entry in the certificate. ... Assuming this method returns CNs in original DER-encoded order. I.e. since Distinguished Name printing _reverses_ the order of the fields in DER form, from that perspective the most specific entry is the _first_ one. This is a bit of a rat's nest of diffuse definitions 😬 | private static Optional<String> getCommonName(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectCommonNames(peerCertificate).stream()
.findFirst();
} | .findFirst(); | private static Optional<String> getCommonName(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectCommonNames(peerCertificate).stream()
.findFirst();
} | class PeerAuthorizer {
private final AuthorizedPeers authorizedPeers;
public PeerAuthorizer(AuthorizedPeers authorizedPeers) {
this.authorizedPeers = authorizedPeers;
}
public AuthorizationResult authorizePeer(X509Certificate peerCertificate) {
Set<Role> assumedRoles = new HashSet<>();
Set<String> matchedPolicies = new HashSet<>();
String cn = getCommonName(peerCertificate).orElse(null);
List<String> sans = getSubjectAlternativeNames(peerCertificate);
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
if (matchesPolicy(peerPolicy, cn, sans)) {
assumedRoles.addAll(peerPolicy.assumedRoles());
matchedPolicies.add(peerPolicy.policyName());
}
}
return new AuthorizationResult(assumedRoles, matchedPolicies);
}
private static boolean matchesPolicy(PeerPolicy peerPolicy, String cn, List<String> sans) {
return peerPolicy.requiredCredentials().stream()
.allMatch(requiredCredential -> matchesRequiredCredentials(requiredCredential, cn, sans));
}
private static boolean matchesRequiredCredentials(RequiredPeerCredential requiredCredential, String cn, List<String> sans) {
switch (requiredCredential.field()) {
case CN:
return cn != null && requiredCredential.pattern().matches(cn);
case SAN_DNS:
return sans.stream()
.anyMatch(san -> requiredCredential.pattern().matches(san));
default:
throw new RuntimeException("Unknown field: " + requiredCredential.field());
}
}
private static List<String> getSubjectAlternativeNames(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream()
.filter(san -> san.getType() == DNS_NAME || san.getType() == IP_ADDRESS)
.map(SubjectAlternativeName::getValue)
.collect(toList());
}
} | class PeerAuthorizer {
private final AuthorizedPeers authorizedPeers;
public PeerAuthorizer(AuthorizedPeers authorizedPeers) {
this.authorizedPeers = authorizedPeers;
}
public AuthorizationResult authorizePeer(X509Certificate peerCertificate) {
Set<Role> assumedRoles = new HashSet<>();
Set<String> matchedPolicies = new HashSet<>();
String cn = getCommonName(peerCertificate).orElse(null);
List<String> sans = getSubjectAlternativeNames(peerCertificate);
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
if (matchesPolicy(peerPolicy, cn, sans)) {
assumedRoles.addAll(peerPolicy.assumedRoles());
matchedPolicies.add(peerPolicy.policyName());
}
}
return new AuthorizationResult(assumedRoles, matchedPolicies);
}
private static boolean matchesPolicy(PeerPolicy peerPolicy, String cn, List<String> sans) {
return peerPolicy.requiredCredentials().stream()
.allMatch(requiredCredential -> matchesRequiredCredentials(requiredCredential, cn, sans));
}
private static boolean matchesRequiredCredentials(RequiredPeerCredential requiredCredential, String cn, List<String> sans) {
switch (requiredCredential.field()) {
case CN:
return cn != null && requiredCredential.pattern().matches(cn);
case SAN_DNS:
return sans.stream()
.anyMatch(san -> requiredCredential.pattern().matches(san));
default:
throw new RuntimeException("Unknown field: " + requiredCredential.field());
}
}
private static List<String> getSubjectAlternativeNames(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream()
.filter(san -> san.getType() == DNS_NAME || san.getType() == IP_ADDRESS)
.map(SubjectAlternativeName::getValue)
.collect(toList());
}
} |
Checking the sets themselves would be nice, e.g. with a `containsInAnyOrder` matcher to make things more readable | public void can_match_multiple_policies() {
RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn");
RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san");
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy("policy-1", createRoles("role-1", "role-2"), cnRequirement, sanRequirement),
createPolicy("policy-2", createRoles("role-2", "role-3"), cnRequirement, sanRequirement));
AuthorizationResult result = peerAuthorizer
.authorizePeer(createCertificate("foo.matching.cn", "foo.matching.san"));
assertAuthorized(result);
assertEquals(3, result.assumedRoles().size());
assertEquals(2, result.matchedPolicies().size());
} | assertEquals(2, result.matchedPolicies().size()); | public void can_match_multiple_policies() {
RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn");
RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san");
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy(POLICY_1, createRoles(ROLE_1, ROLE_2), cnRequirement, sanRequirement),
createPolicy(POLICY_2, createRoles(ROLE_2, ROLE_3), cnRequirement, sanRequirement));
AuthorizationResult result = peerAuthorizer
.authorizePeer(createCertificate("foo.matching.cn", "foo.matching.san"));
assertAuthorized(result);
assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1, ROLE_2, ROLE_3);
assertThat(result.matchedPolicies()).containsOnly(POLICY_1, POLICY_2);
} | class PeerAuthorizerTest {
private static final KeyPair KEY_PAIR = KeyUtils.generateKeypair(KeyAlgorithm.EC);
@Test
public void certificate_must_match_both_san_and_cn_pattern() {
String roleName = "role";
String policyName = "policy";
RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn");
RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san");
PeerAuthorizer authorizer = createPeerAuthorizer(createPolicy(policyName, createRoles(roleName), cnRequirement, sanRequirement));
AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", "foo.matching.san", "foo.invalid.san"));
assertAuthorized(result);
assertEquals(1, result.assumedRoles().size());
assertEquals(roleName, result.assumedRoles().iterator().next().name());
assertEquals(1, result.matchedPolicies().size());
assertEquals(policyName, result.matchedPolicies().iterator().next());
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san")));
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san", "foo.invalid.san")));
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.matching.cn", "foo.invalid.san")));
}
@Test
@Test
public void can_match_subset_of_policies() {
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy("policy-1", createRoles("role-1"), createRequiredCredential(CN, "*.matching.cn")),
createPolicy("policy-2", createRoles("role-1", "role-2"), createRequiredCredential(SAN_DNS, "*.matching.san")));
AuthorizationResult result = peerAuthorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san"));
assertAuthorized(result);
assertEquals(2, result.assumedRoles().size());
assertEquals(1, result.matchedPolicies().size());
}
@Test
public void must_match_all_cn_and_san_patterns() {
RequiredPeerCredential cnSuffixRequirement = createRequiredCredential(CN, "*.*.matching.suffix.cn");
RequiredPeerCredential cnPrefixRequirement = createRequiredCredential(CN, "matching.prefix.*.*.*");
RequiredPeerCredential sanPrefixRequirement = createRequiredCredential(SAN_DNS, "*.*.matching.suffix.san");
RequiredPeerCredential sanSuffixRequirement = createRequiredCredential(SAN_DNS, "matching.prefix.*.*.*");
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy("policy", emptySet(), cnSuffixRequirement, cnPrefixRequirement, sanPrefixRequirement, sanSuffixRequirement));
assertAuthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", "matching.prefix.matching.suffix.san")));
assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", "matching.prefix.invalid.suffix.san")));
assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("invalid.prefix.matching.suffix.cn", "matching.prefix.matching.suffix.san")));
}
private static X509Certificate createCertificate(String subjectCn, String... sanCns) {
X509CertificateBuilder builder =
X509CertificateBuilder.fromKeypair(
KEY_PAIR,
new X500Principal("CN=" + subjectCn),
Instant.EPOCH,
Instant.EPOCH.plus(100000, ChronoUnit.DAYS),
SHA256_WITH_ECDSA,
BigInteger.ONE);
for (String sanCn : sanCns) {
builder.addSubjectAlternativeName(sanCn);
}
return builder.build();
}
private static RequiredPeerCredential createRequiredCredential(Field field, String pattern) {
return new RequiredPeerCredential(field, new HostGlobPattern(pattern));
}
private static Set<Role> createRoles(String... roleNames) {
return Arrays.stream(roleNames).map(Role::new).collect(toSet());
}
private static PeerAuthorizer createPeerAuthorizer(PeerPolicy... policies) {
return new PeerAuthorizer(new AuthorizedPeers(Arrays.stream(policies).collect(toSet())));
}
private static PeerPolicy createPolicy(String name, Set<Role> roles, RequiredPeerCredential... requiredCredentials) {
return new PeerPolicy(name, roles, Arrays.asList(requiredCredentials));
}
private static void assertAuthorized(AuthorizationResult result) {
assertTrue(result.succeeded());
}
private static void assertUnauthorized(AuthorizationResult result) {
assertFalse(result.succeeded());
}
} | class PeerAuthorizerTest {
private static final KeyPair KEY_PAIR = KeyUtils.generateKeypair(KeyAlgorithm.EC);
private static final String ROLE_1 = "role-1", ROLE_2 = "role-2", ROLE_3 = "role-3", POLICY_1 = "policy-1", POLICY_2 = "policy-2";
@Test
public void certificate_must_match_both_san_and_cn_pattern() {
RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn");
RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san");
PeerAuthorizer authorizer = createPeerAuthorizer(createPolicy(POLICY_1, createRoles(ROLE_1), cnRequirement, sanRequirement));
AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", "foo.matching.san", "foo.invalid.san"));
assertAuthorized(result);
assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1);
assertThat(result.matchedPolicies()).containsOnly(POLICY_1);
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san")));
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san", "foo.invalid.san")));
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.matching.cn", "foo.invalid.san")));
}
@Test
@Test
public void can_match_subset_of_policies() {
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy(POLICY_1, createRoles(ROLE_1), createRequiredCredential(CN, "*.matching.cn")),
createPolicy(POLICY_2, createRoles(ROLE_1, ROLE_2), createRequiredCredential(SAN_DNS, "*.matching.san")));
AuthorizationResult result = peerAuthorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san"));
assertAuthorized(result);
assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1, ROLE_2);
assertThat(result.matchedPolicies()).containsOnly(POLICY_2);
}
@Test
public void must_match_all_cn_and_san_patterns() {
RequiredPeerCredential cnSuffixRequirement = createRequiredCredential(CN, "*.*.matching.suffix.cn");
RequiredPeerCredential cnPrefixRequirement = createRequiredCredential(CN, "matching.prefix.*.*.*");
RequiredPeerCredential sanPrefixRequirement = createRequiredCredential(SAN_DNS, "*.*.matching.suffix.san");
RequiredPeerCredential sanSuffixRequirement = createRequiredCredential(SAN_DNS, "matching.prefix.*.*.*");
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy(POLICY_1, emptySet(), cnSuffixRequirement, cnPrefixRequirement, sanPrefixRequirement, sanSuffixRequirement));
assertAuthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", "matching.prefix.matching.suffix.san")));
assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", "matching.prefix.invalid.suffix.san")));
assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("invalid.prefix.matching.suffix.cn", "matching.prefix.matching.suffix.san")));
}
private static X509Certificate createCertificate(String subjectCn, String... sanCns) {
X509CertificateBuilder builder =
X509CertificateBuilder.fromKeypair(
KEY_PAIR,
new X500Principal("CN=" + subjectCn),
Instant.EPOCH,
Instant.EPOCH.plus(100000, ChronoUnit.DAYS),
SHA256_WITH_ECDSA,
BigInteger.ONE);
for (String sanCn : sanCns) {
builder.addSubjectAlternativeName(sanCn);
}
return builder.build();
}
private static RequiredPeerCredential createRequiredCredential(Field field, String pattern) {
return new RequiredPeerCredential(field, new HostGlobPattern(pattern));
}
private static Set<Role> createRoles(String... roleNames) {
return Arrays.stream(roleNames).map(Role::new).collect(toSet());
}
private static PeerAuthorizer createPeerAuthorizer(PeerPolicy... policies) {
return new PeerAuthorizer(new AuthorizedPeers(Arrays.stream(policies).collect(toSet())));
}
private static PeerPolicy createPolicy(String name, Set<Role> roles, RequiredPeerCredential... requiredCredentials) {
return new PeerPolicy(name, roles, Arrays.asList(requiredCredentials));
}
private static void assertAuthorized(AuthorizationResult result) {
assertTrue(result.succeeded());
}
private static void assertUnauthorized(AuthorizationResult result) {
assertFalse(result.succeeded());
}
} |
Same here | public void can_match_subset_of_policies() {
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy("policy-1", createRoles("role-1"), createRequiredCredential(CN, "*.matching.cn")),
createPolicy("policy-2", createRoles("role-1", "role-2"), createRequiredCredential(SAN_DNS, "*.matching.san")));
AuthorizationResult result = peerAuthorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san"));
assertAuthorized(result);
assertEquals(2, result.assumedRoles().size());
assertEquals(1, result.matchedPolicies().size());
} | assertEquals(1, result.matchedPolicies().size()); | public void can_match_subset_of_policies() {
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy(POLICY_1, createRoles(ROLE_1), createRequiredCredential(CN, "*.matching.cn")),
createPolicy(POLICY_2, createRoles(ROLE_1, ROLE_2), createRequiredCredential(SAN_DNS, "*.matching.san")));
AuthorizationResult result = peerAuthorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san"));
assertAuthorized(result);
assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1, ROLE_2);
assertThat(result.matchedPolicies()).containsOnly(POLICY_2);
} | class PeerAuthorizerTest {
private static final KeyPair KEY_PAIR = KeyUtils.generateKeypair(KeyAlgorithm.EC);
@Test
public void certificate_must_match_both_san_and_cn_pattern() {
String roleName = "role";
String policyName = "policy";
RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn");
RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san");
PeerAuthorizer authorizer = createPeerAuthorizer(createPolicy(policyName, createRoles(roleName), cnRequirement, sanRequirement));
AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", "foo.matching.san", "foo.invalid.san"));
assertAuthorized(result);
assertEquals(1, result.assumedRoles().size());
assertEquals(roleName, result.assumedRoles().iterator().next().name());
assertEquals(1, result.matchedPolicies().size());
assertEquals(policyName, result.matchedPolicies().iterator().next());
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san")));
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san", "foo.invalid.san")));
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.matching.cn", "foo.invalid.san")));
}
@Test
public void can_match_multiple_policies() {
RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn");
RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san");
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy("policy-1", createRoles("role-1", "role-2"), cnRequirement, sanRequirement),
createPolicy("policy-2", createRoles("role-2", "role-3"), cnRequirement, sanRequirement));
AuthorizationResult result = peerAuthorizer
.authorizePeer(createCertificate("foo.matching.cn", "foo.matching.san"));
assertAuthorized(result);
assertEquals(3, result.assumedRoles().size());
assertEquals(2, result.matchedPolicies().size());
}
@Test
@Test
public void must_match_all_cn_and_san_patterns() {
RequiredPeerCredential cnSuffixRequirement = createRequiredCredential(CN, "*.*.matching.suffix.cn");
RequiredPeerCredential cnPrefixRequirement = createRequiredCredential(CN, "matching.prefix.*.*.*");
RequiredPeerCredential sanPrefixRequirement = createRequiredCredential(SAN_DNS, "*.*.matching.suffix.san");
RequiredPeerCredential sanSuffixRequirement = createRequiredCredential(SAN_DNS, "matching.prefix.*.*.*");
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy("policy", emptySet(), cnSuffixRequirement, cnPrefixRequirement, sanPrefixRequirement, sanSuffixRequirement));
assertAuthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", "matching.prefix.matching.suffix.san")));
assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", "matching.prefix.invalid.suffix.san")));
assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("invalid.prefix.matching.suffix.cn", "matching.prefix.matching.suffix.san")));
}
private static X509Certificate createCertificate(String subjectCn, String... sanCns) {
X509CertificateBuilder builder =
X509CertificateBuilder.fromKeypair(
KEY_PAIR,
new X500Principal("CN=" + subjectCn),
Instant.EPOCH,
Instant.EPOCH.plus(100000, ChronoUnit.DAYS),
SHA256_WITH_ECDSA,
BigInteger.ONE);
for (String sanCn : sanCns) {
builder.addSubjectAlternativeName(sanCn);
}
return builder.build();
}
private static RequiredPeerCredential createRequiredCredential(Field field, String pattern) {
return new RequiredPeerCredential(field, new HostGlobPattern(pattern));
}
private static Set<Role> createRoles(String... roleNames) {
return Arrays.stream(roleNames).map(Role::new).collect(toSet());
}
private static PeerAuthorizer createPeerAuthorizer(PeerPolicy... policies) {
return new PeerAuthorizer(new AuthorizedPeers(Arrays.stream(policies).collect(toSet())));
}
private static PeerPolicy createPolicy(String name, Set<Role> roles, RequiredPeerCredential... requiredCredentials) {
return new PeerPolicy(name, roles, Arrays.asList(requiredCredentials));
}
private static void assertAuthorized(AuthorizationResult result) {
assertTrue(result.succeeded());
}
private static void assertUnauthorized(AuthorizationResult result) {
assertFalse(result.succeeded());
}
} | class PeerAuthorizerTest {
private static final KeyPair KEY_PAIR = KeyUtils.generateKeypair(KeyAlgorithm.EC);
private static final String ROLE_1 = "role-1", ROLE_2 = "role-2", ROLE_3 = "role-3", POLICY_1 = "policy-1", POLICY_2 = "policy-2";
@Test
public void certificate_must_match_both_san_and_cn_pattern() {
RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn");
RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san");
PeerAuthorizer authorizer = createPeerAuthorizer(createPolicy(POLICY_1, createRoles(ROLE_1), cnRequirement, sanRequirement));
AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", "foo.matching.san", "foo.invalid.san"));
assertAuthorized(result);
assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1);
assertThat(result.matchedPolicies()).containsOnly(POLICY_1);
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san")));
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.invalid.cn", "foo.matching.san", "foo.invalid.san")));
assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.matching.cn", "foo.invalid.san")));
}
@Test
public void can_match_multiple_policies() {
RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn");
RequiredPeerCredential sanRequirement = createRequiredCredential(SAN_DNS, "*.matching.san");
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy(POLICY_1, createRoles(ROLE_1, ROLE_2), cnRequirement, sanRequirement),
createPolicy(POLICY_2, createRoles(ROLE_2, ROLE_3), cnRequirement, sanRequirement));
AuthorizationResult result = peerAuthorizer
.authorizePeer(createCertificate("foo.matching.cn", "foo.matching.san"));
assertAuthorized(result);
assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1, ROLE_2, ROLE_3);
assertThat(result.matchedPolicies()).containsOnly(POLICY_1, POLICY_2);
}
@Test
@Test
public void must_match_all_cn_and_san_patterns() {
RequiredPeerCredential cnSuffixRequirement = createRequiredCredential(CN, "*.*.matching.suffix.cn");
RequiredPeerCredential cnPrefixRequirement = createRequiredCredential(CN, "matching.prefix.*.*.*");
RequiredPeerCredential sanPrefixRequirement = createRequiredCredential(SAN_DNS, "*.*.matching.suffix.san");
RequiredPeerCredential sanSuffixRequirement = createRequiredCredential(SAN_DNS, "matching.prefix.*.*.*");
PeerAuthorizer peerAuthorizer = createPeerAuthorizer(
createPolicy(POLICY_1, emptySet(), cnSuffixRequirement, cnPrefixRequirement, sanPrefixRequirement, sanSuffixRequirement));
assertAuthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", "matching.prefix.matching.suffix.san")));
assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("matching.prefix.matching.suffix.cn", "matching.prefix.invalid.suffix.san")));
assertUnauthorized(peerAuthorizer.authorizePeer(createCertificate("invalid.prefix.matching.suffix.cn", "matching.prefix.matching.suffix.san")));
}
private static X509Certificate createCertificate(String subjectCn, String... sanCns) {
X509CertificateBuilder builder =
X509CertificateBuilder.fromKeypair(
KEY_PAIR,
new X500Principal("CN=" + subjectCn),
Instant.EPOCH,
Instant.EPOCH.plus(100000, ChronoUnit.DAYS),
SHA256_WITH_ECDSA,
BigInteger.ONE);
for (String sanCn : sanCns) {
builder.addSubjectAlternativeName(sanCn);
}
return builder.build();
}
private static RequiredPeerCredential createRequiredCredential(Field field, String pattern) {
return new RequiredPeerCredential(field, new HostGlobPattern(pattern));
}
private static Set<Role> createRoles(String... roleNames) {
return Arrays.stream(roleNames).map(Role::new).collect(toSet());
}
private static PeerAuthorizer createPeerAuthorizer(PeerPolicy... policies) {
return new PeerAuthorizer(new AuthorizedPeers(Arrays.stream(policies).collect(toSet())));
}
private static PeerPolicy createPolicy(String name, Set<Role> roles, RequiredPeerCredential... requiredCredentials) {
return new PeerPolicy(name, roles, Arrays.asList(requiredCredentials));
}
private static void assertAuthorized(AuthorizationResult result) {
assertTrue(result.succeeded());
}
private static void assertUnauthorized(AuthorizationResult result) {
assertFalse(result.succeeded());
}
} |
The current strategy of picking the first should be "correct" given that DN parsing reverses the order of CN fields compared to the DER format. | private static Optional<String> getCommonName(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectCommonNames(peerCertificate).stream()
.findFirst();
} | .findFirst(); | private static Optional<String> getCommonName(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectCommonNames(peerCertificate).stream()
.findFirst();
} | class PeerAuthorizer {
private final AuthorizedPeers authorizedPeers;
public PeerAuthorizer(AuthorizedPeers authorizedPeers) {
this.authorizedPeers = authorizedPeers;
}
public AuthorizationResult authorizePeer(X509Certificate peerCertificate) {
Set<Role> assumedRoles = new HashSet<>();
Set<String> matchedPolicies = new HashSet<>();
String cn = getCommonName(peerCertificate).orElse(null);
List<String> sans = getSubjectAlternativeNames(peerCertificate);
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
if (matchesPolicy(peerPolicy, cn, sans)) {
assumedRoles.addAll(peerPolicy.assumedRoles());
matchedPolicies.add(peerPolicy.policyName());
}
}
return new AuthorizationResult(assumedRoles, matchedPolicies);
}
private static boolean matchesPolicy(PeerPolicy peerPolicy, String cn, List<String> sans) {
return peerPolicy.requiredCredentials().stream()
.allMatch(requiredCredential -> matchesRequiredCredentials(requiredCredential, cn, sans));
}
private static boolean matchesRequiredCredentials(RequiredPeerCredential requiredCredential, String cn, List<String> sans) {
switch (requiredCredential.field()) {
case CN:
return cn != null && requiredCredential.pattern().matches(cn);
case SAN_DNS:
return sans.stream()
.anyMatch(san -> requiredCredential.pattern().matches(san));
default:
throw new RuntimeException("Unknown field: " + requiredCredential.field());
}
}
private static List<String> getSubjectAlternativeNames(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream()
.filter(san -> san.getType() == DNS_NAME || san.getType() == IP_ADDRESS)
.map(SubjectAlternativeName::getValue)
.collect(toList());
}
} | class PeerAuthorizer {
private final AuthorizedPeers authorizedPeers;
public PeerAuthorizer(AuthorizedPeers authorizedPeers) {
this.authorizedPeers = authorizedPeers;
}
public AuthorizationResult authorizePeer(X509Certificate peerCertificate) {
Set<Role> assumedRoles = new HashSet<>();
Set<String> matchedPolicies = new HashSet<>();
String cn = getCommonName(peerCertificate).orElse(null);
List<String> sans = getSubjectAlternativeNames(peerCertificate);
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
if (matchesPolicy(peerPolicy, cn, sans)) {
assumedRoles.addAll(peerPolicy.assumedRoles());
matchedPolicies.add(peerPolicy.policyName());
}
}
return new AuthorizationResult(assumedRoles, matchedPolicies);
}
private static boolean matchesPolicy(PeerPolicy peerPolicy, String cn, List<String> sans) {
return peerPolicy.requiredCredentials().stream()
.allMatch(requiredCredential -> matchesRequiredCredentials(requiredCredential, cn, sans));
}
private static boolean matchesRequiredCredentials(RequiredPeerCredential requiredCredential, String cn, List<String> sans) {
switch (requiredCredential.field()) {
case CN:
return cn != null && requiredCredential.pattern().matches(cn);
case SAN_DNS:
return sans.stream()
.anyMatch(san -> requiredCredential.pattern().matches(san));
default:
throw new RuntimeException("Unknown field: " + requiredCredential.field());
}
}
private static List<String> getSubjectAlternativeNames(X509Certificate peerCertificate) {
return X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream()
.filter(san -> san.getType() == DNS_NAME || san.getType() == IP_ADDRESS)
.map(SubjectAlternativeName::getValue)
.collect(toList());
}
} |
Consider adding a helper function (!isNestedFieldName(...)) to better describe what we try to do here. | private static void considerField(ImportedFieldsConfig.Builder builder, ImportedField field) {
ImmutableSDField targetField = field.targetField();
String targetFieldName = targetField.getName();
if (targetFieldName.indexOf('.') == -1) {
if (field.targetField().doesAttributing()) {
builder.attribute.add(createAttributeBuilder(field));
}
} else {
Attribute attribute = targetField.getAttributes().get(targetFieldName);
if (attribute != null) {
builder.attribute.add(createAttributeBuilder(field));
}
}
} | if (targetFieldName.indexOf('.') == -1) { | private static void considerField(ImportedFieldsConfig.Builder builder, ImportedField field) {
ImmutableSDField targetField = field.targetField();
String targetFieldName = targetField.getName();
if (!isNestedFieldName(targetFieldName)) {
if (targetField.doesAttributing()) {
builder.attribute.add(createAttributeBuilder(field));
}
} else {
Attribute attribute = targetField.getAttributes().get(targetFieldName);
if (attribute != null) {
builder.attribute.add(createAttributeBuilder(field));
}
}
} | class ImportedFields extends Derived implements ImportedFieldsConfig.Producer {
private Optional<com.yahoo.searchdefinition.document.ImportedFields> importedFields = Optional.empty();
public ImportedFields(Search search) {
derive(search);
}
@Override
protected void derive(Search search) {
importedFields = search.importedFields();
}
@Override
protected String getDerivedName() {
return "imported-fields";
}
@Override
public void getConfig(ImportedFieldsConfig.Builder builder) {
if (importedFields.isPresent()) {
importedFields.get().fields().forEach( (name, field) -> considerField(builder, field));
}
}
private static ImportedFieldsConfig.Attribute.Builder createAttributeBuilder(ImportedField field) {
ImportedFieldsConfig.Attribute.Builder result = new ImportedFieldsConfig.Attribute.Builder();
result.name(field.fieldName());
result.referencefield(field.reference().referenceField().getName());
result.targetfield(field.targetField().getName());
return result;
}
} | class ImportedFields extends Derived implements ImportedFieldsConfig.Producer {
private Optional<com.yahoo.searchdefinition.document.ImportedFields> importedFields = Optional.empty();
public ImportedFields(Search search) {
derive(search);
}
@Override
protected void derive(Search search) {
importedFields = search.importedFields();
}
@Override
protected String getDerivedName() {
return "imported-fields";
}
@Override
public void getConfig(ImportedFieldsConfig.Builder builder) {
if (importedFields.isPresent()) {
importedFields.get().fields().forEach( (name, field) -> considerField(builder, field));
}
}
private static boolean isNestedFieldName(String fieldName) {
return fieldName.indexOf('.') != -1;
}
private static ImportedFieldsConfig.Attribute.Builder createAttributeBuilder(ImportedField field) {
ImportedFieldsConfig.Attribute.Builder result = new ImportedFieldsConfig.Attribute.Builder();
result.name(field.fieldName());
result.referencefield(field.reference().referenceField().getName());
result.targetfield(field.targetField().getName());
return result;
}
} |
I'm presuming the cert at index 0 is always present and represents the peer's leaf certificate? | public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType);
authorizePeer(chain[0], authType, true, null);
} | authorizePeer(chain[0], authType, true, null); | public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType);
authorizePeer(chain[0], authType, true, null);
} | class PeerAuthorizerTrustManager extends X509ExtendedTrustManager {
public static final String HANDSHAKE_SESSION_AUTHZ_RESULT_PROPERTY = "vespa.tls.authorization.result";
private static final Logger log = Logger.getLogger(PeerAuthorizerTrustManager.class.getName());
public enum Mode { DRY_RUN, ENFORCE }
private final PeerAuthorizer authorizer;
private final X509ExtendedTrustManager defaultTrustManager;
private final Mode mode;
public PeerAuthorizerTrustManager(AuthorizedPeers authorizedPeers, Mode mode, X509ExtendedTrustManager defaultTrustManager) {
this.authorizer = new PeerAuthorizer(authorizedPeers);
this.mode = mode;
this.defaultTrustManager = defaultTrustManager;
}
public static TrustManager[] wrapTrustManagersFromKeystore(AuthorizedPeers authorizedPeers, Mode mode, KeyStore keystore) throws GeneralSecurityException {
TrustManagerFactory factory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
factory.init(keystore);
return wrapTrustManagers(authorizedPeers, mode, factory.getTrustManagers());
}
public static TrustManager[] wrapTrustManagers(AuthorizedPeers authorizedPeers, Mode mode, TrustManager[] managers) {
TrustManager[] wrappedManagers = new TrustManager[managers.length];
for (int i = 0; i < managers.length; i++) {
if (managers[i] instanceof X509ExtendedTrustManager) {
wrappedManagers[i] = new PeerAuthorizerTrustManager(authorizedPeers, mode, (X509ExtendedTrustManager) managers[i]);
} else {
wrappedManagers[i] = managers[i];
}
}
return wrappedManagers;
}
@Override
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType);
authorizePeer(chain[0], authType, false, null);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType, socket);
authorizePeer(chain[0], authType, true, null);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType, socket);
authorizePeer(chain[0], authType, false, null);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType, sslEngine);
authorizePeer(chain[0], authType, true, sslEngine);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType, sslEngine);
authorizePeer(chain[0], authType, false, sslEngine);
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return defaultTrustManager.getAcceptedIssuers();
}
private void authorizePeer(X509Certificate certificate, String authType, boolean isVerifyingClient, SSLEngine sslEngine) throws CertificateException {
log.fine(() -> "Verifying certificate: " + createInfoString(certificate, authType, isVerifyingClient));
AuthorizationResult result = authorizer.authorizePeer(certificate);
if (sslEngine != null) {
sslEngine.getHandshakeSession().putValue(HANDSHAKE_SESSION_AUTHZ_RESULT_PROPERTY, result);
}
if (result.succeeded()) {
log.fine(() -> String.format("Verification result: %s", result));
} else {
String errorMessage = "Authorization failed: " + createInfoString(certificate, authType, isVerifyingClient);
log.warning(errorMessage);
switch (mode) {
case ENFORCE:
throw new CertificateException(errorMessage);
case DRY_RUN:
break;
default:
throw new UnsupportedOperationException();
}
}
}
private static String createInfoString(X509Certificate certificate, String authType, boolean isVerifyingClient) {
return String.format("DN='%s', SANs=%s, authType='%s', isVerifyingClient='%b'",
certificate.getSubjectX500Principal(), X509CertificateUtils.getSubjectAlternativeNames(certificate), authType, isVerifyingClient);
}
} | class PeerAuthorizerTrustManager extends X509ExtendedTrustManager {
public static final String HANDSHAKE_SESSION_AUTHZ_RESULT_PROPERTY = "vespa.tls.authorization.result";
private static final Logger log = Logger.getLogger(PeerAuthorizerTrustManager.class.getName());
public enum Mode { DRY_RUN, ENFORCE }
private final PeerAuthorizer authorizer;
private final X509ExtendedTrustManager defaultTrustManager;
private final Mode mode;
public PeerAuthorizerTrustManager(AuthorizedPeers authorizedPeers, Mode mode, X509ExtendedTrustManager defaultTrustManager) {
this.authorizer = new PeerAuthorizer(authorizedPeers);
this.mode = mode;
this.defaultTrustManager = defaultTrustManager;
}
public static TrustManager[] wrapTrustManagersFromKeystore(AuthorizedPeers authorizedPeers, Mode mode, KeyStore keystore) throws GeneralSecurityException {
TrustManagerFactory factory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
factory.init(keystore);
return wrapTrustManagers(authorizedPeers, mode, factory.getTrustManagers());
}
public static TrustManager[] wrapTrustManagers(AuthorizedPeers authorizedPeers, Mode mode, TrustManager[] managers) {
TrustManager[] wrappedManagers = new TrustManager[managers.length];
for (int i = 0; i < managers.length; i++) {
if (managers[i] instanceof X509ExtendedTrustManager) {
wrappedManagers[i] = new PeerAuthorizerTrustManager(authorizedPeers, mode, (X509ExtendedTrustManager) managers[i]);
} else {
wrappedManagers[i] = managers[i];
}
}
return wrappedManagers;
}
@Override
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType);
authorizePeer(chain[0], authType, false, null);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType, socket);
authorizePeer(chain[0], authType, true, null);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType, socket);
authorizePeer(chain[0], authType, false, null);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType, sslEngine);
authorizePeer(chain[0], authType, true, sslEngine);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType, sslEngine);
authorizePeer(chain[0], authType, false, sslEngine);
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return defaultTrustManager.getAcceptedIssuers();
}
private void authorizePeer(X509Certificate certificate, String authType, boolean isVerifyingClient, SSLEngine sslEngine) throws CertificateException {
log.fine(() -> "Verifying certificate: " + createInfoString(certificate, authType, isVerifyingClient));
AuthorizationResult result = authorizer.authorizePeer(certificate);
if (sslEngine != null) {
sslEngine.getHandshakeSession().putValue(HANDSHAKE_SESSION_AUTHZ_RESULT_PROPERTY, result);
}
if (result.succeeded()) {
log.fine(() -> String.format("Verification result: %s", result));
} else {
String errorMessage = "Authorization failed: " + createInfoString(certificate, authType, isVerifyingClient);
log.warning(errorMessage);
switch (mode) {
case ENFORCE:
throw new CertificateException(errorMessage);
case DRY_RUN:
break;
default:
throw new UnsupportedOperationException();
}
}
}
private static String createInfoString(X509Certificate certificate, String authType, boolean isVerifyingClient) {
return String.format("DN='%s', SANs=%s, authType='%s', isVerifyingClient='%b'",
certificate.getSubjectX500Principal(), X509CertificateUtils.getSubjectAlternativeNames(certificate), authType, isVerifyingClient);
}
} |
Yes. The `X509ExtendedTrustManager` interface requires all implementation to throw an `IllegalArgumentException` under that scenario: "if null or zero-length array is passed in for the chain parameter or if null or zero-length string is passed in for the authType parameter". Calling `defaultTrustManager. checkClient/ServerTrusted ` will therefore ensure that accessing `chain[0]` later on is safe. | public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType);
authorizePeer(chain[0], authType, true, null);
} | authorizePeer(chain[0], authType, true, null); | public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType);
authorizePeer(chain[0], authType, true, null);
} | class PeerAuthorizerTrustManager extends X509ExtendedTrustManager {
public static final String HANDSHAKE_SESSION_AUTHZ_RESULT_PROPERTY = "vespa.tls.authorization.result";
private static final Logger log = Logger.getLogger(PeerAuthorizerTrustManager.class.getName());
public enum Mode { DRY_RUN, ENFORCE }
private final PeerAuthorizer authorizer;
private final X509ExtendedTrustManager defaultTrustManager;
private final Mode mode;
public PeerAuthorizerTrustManager(AuthorizedPeers authorizedPeers, Mode mode, X509ExtendedTrustManager defaultTrustManager) {
this.authorizer = new PeerAuthorizer(authorizedPeers);
this.mode = mode;
this.defaultTrustManager = defaultTrustManager;
}
public static TrustManager[] wrapTrustManagersFromKeystore(AuthorizedPeers authorizedPeers, Mode mode, KeyStore keystore) throws GeneralSecurityException {
TrustManagerFactory factory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
factory.init(keystore);
return wrapTrustManagers(authorizedPeers, mode, factory.getTrustManagers());
}
public static TrustManager[] wrapTrustManagers(AuthorizedPeers authorizedPeers, Mode mode, TrustManager[] managers) {
TrustManager[] wrappedManagers = new TrustManager[managers.length];
for (int i = 0; i < managers.length; i++) {
if (managers[i] instanceof X509ExtendedTrustManager) {
wrappedManagers[i] = new PeerAuthorizerTrustManager(authorizedPeers, mode, (X509ExtendedTrustManager) managers[i]);
} else {
wrappedManagers[i] = managers[i];
}
}
return wrappedManagers;
}
@Override
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType);
authorizePeer(chain[0], authType, false, null);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType, socket);
authorizePeer(chain[0], authType, true, null);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType, socket);
authorizePeer(chain[0], authType, false, null);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType, sslEngine);
authorizePeer(chain[0], authType, true, sslEngine);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType, sslEngine);
authorizePeer(chain[0], authType, false, sslEngine);
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return defaultTrustManager.getAcceptedIssuers();
}
private void authorizePeer(X509Certificate certificate, String authType, boolean isVerifyingClient, SSLEngine sslEngine) throws CertificateException {
log.fine(() -> "Verifying certificate: " + createInfoString(certificate, authType, isVerifyingClient));
AuthorizationResult result = authorizer.authorizePeer(certificate);
if (sslEngine != null) {
sslEngine.getHandshakeSession().putValue(HANDSHAKE_SESSION_AUTHZ_RESULT_PROPERTY, result);
}
if (result.succeeded()) {
log.fine(() -> String.format("Verification result: %s", result));
} else {
String errorMessage = "Authorization failed: " + createInfoString(certificate, authType, isVerifyingClient);
log.warning(errorMessage);
switch (mode) {
case ENFORCE:
throw new CertificateException(errorMessage);
case DRY_RUN:
break;
default:
throw new UnsupportedOperationException();
}
}
}
private static String createInfoString(X509Certificate certificate, String authType, boolean isVerifyingClient) {
return String.format("DN='%s', SANs=%s, authType='%s', isVerifyingClient='%b'",
certificate.getSubjectX500Principal(), X509CertificateUtils.getSubjectAlternativeNames(certificate), authType, isVerifyingClient);
}
} | class PeerAuthorizerTrustManager extends X509ExtendedTrustManager {
public static final String HANDSHAKE_SESSION_AUTHZ_RESULT_PROPERTY = "vespa.tls.authorization.result";
private static final Logger log = Logger.getLogger(PeerAuthorizerTrustManager.class.getName());
public enum Mode { DRY_RUN, ENFORCE }
private final PeerAuthorizer authorizer;
private final X509ExtendedTrustManager defaultTrustManager;
private final Mode mode;
public PeerAuthorizerTrustManager(AuthorizedPeers authorizedPeers, Mode mode, X509ExtendedTrustManager defaultTrustManager) {
this.authorizer = new PeerAuthorizer(authorizedPeers);
this.mode = mode;
this.defaultTrustManager = defaultTrustManager;
}
public static TrustManager[] wrapTrustManagersFromKeystore(AuthorizedPeers authorizedPeers, Mode mode, KeyStore keystore) throws GeneralSecurityException {
TrustManagerFactory factory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
factory.init(keystore);
return wrapTrustManagers(authorizedPeers, mode, factory.getTrustManagers());
}
public static TrustManager[] wrapTrustManagers(AuthorizedPeers authorizedPeers, Mode mode, TrustManager[] managers) {
TrustManager[] wrappedManagers = new TrustManager[managers.length];
for (int i = 0; i < managers.length; i++) {
if (managers[i] instanceof X509ExtendedTrustManager) {
wrappedManagers[i] = new PeerAuthorizerTrustManager(authorizedPeers, mode, (X509ExtendedTrustManager) managers[i]);
} else {
wrappedManagers[i] = managers[i];
}
}
return wrappedManagers;
}
@Override
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType);
authorizePeer(chain[0], authType, false, null);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType, socket);
authorizePeer(chain[0], authType, true, null);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType, socket);
authorizePeer(chain[0], authType, false, null);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException {
defaultTrustManager.checkClientTrusted(chain, authType, sslEngine);
authorizePeer(chain[0], authType, true, sslEngine);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException {
defaultTrustManager.checkServerTrusted(chain, authType, sslEngine);
authorizePeer(chain[0], authType, false, sslEngine);
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return defaultTrustManager.getAcceptedIssuers();
}
private void authorizePeer(X509Certificate certificate, String authType, boolean isVerifyingClient, SSLEngine sslEngine) throws CertificateException {
log.fine(() -> "Verifying certificate: " + createInfoString(certificate, authType, isVerifyingClient));
AuthorizationResult result = authorizer.authorizePeer(certificate);
if (sslEngine != null) {
sslEngine.getHandshakeSession().putValue(HANDSHAKE_SESSION_AUTHZ_RESULT_PROPERTY, result);
}
if (result.succeeded()) {
log.fine(() -> String.format("Verification result: %s", result));
} else {
String errorMessage = "Authorization failed: " + createInfoString(certificate, authType, isVerifyingClient);
log.warning(errorMessage);
switch (mode) {
case ENFORCE:
throw new CertificateException(errorMessage);
case DRY_RUN:
break;
default:
throw new UnsupportedOperationException();
}
}
}
private static String createInfoString(X509Certificate certificate, String authType, boolean isVerifyingClient) {
return String.format("DN='%s', SANs=%s, authType='%s', isVerifyingClient='%b'",
certificate.getSubjectX500Principal(), X509CertificateUtils.getSubjectAlternativeNames(certificate), authType, isVerifyingClient);
}
} |
this should fail right? | public void test_find_allocation_invalid_ipv4_reverse_record() {
IP.AddressPool pool = dualStackPool();
resolver.removeRecord("127.0.0.2")
.addReverseRecord("127.0.0.2", "host5");
try {
pool.findAllocation(emptyList, resolver);
} catch (IllegalArgumentException e) {
assertEquals("Hostnames resolved from each IP address do not point to the same hostname " +
"[::1 -> host3, 127.0.0.2 -> host5]", e.getMessage());
}
} | public void test_find_allocation_invalid_ipv4_reverse_record() {
IP.AddressPool pool = dualStackPool();
resolver.removeRecord("127.0.0.2")
.addReverseRecord("127.0.0.2", "host5");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Hostnames resolved from each IP address do not point to the same hostname " +
"[::1 -> host3, 127.0.0.2 -> host5]", e.getMessage());
}
} | class IPTest {
private static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default");
private static final NodeList emptyList = new NodeList(Collections.emptyList());
private MockNameResolver resolver;
@Before
public void before() {
resolver = new MockNameResolver().explicitReverseRecords();
}
@Test
public void test_natural_order() {
Set<String> ipAddresses = ImmutableSet.of(
"192.168.254.1",
"192.168.254.254",
"127.7.3.1",
"127.5.254.1",
"172.16.100.1",
"172.16.254.2",
"2001:db8:0:0:0:0:0:ffff",
"2001:db8:95a3:0:0:0:0:7334",
"2001:db8:85a3:0:0:8a2e:370:7334",
"::1",
"::10",
"::20");
assertEquals(
Arrays.asList(
"127.5.254.1",
"127.7.3.1",
"172.16.100.1",
"172.16.254.2",
"192.168.254.1",
"192.168.254.254",
"::1",
"::10",
"::20",
"2001:db8:0:0:0:0:0:ffff",
"2001:db8:85a3:0:0:8a2e:370:7334",
"2001:db8:95a3:0:0:0:0:7334"),
new ArrayList<>(ImmutableSortedSet.copyOf(IP.naturalOrder, ipAddresses))
);
}
@Test
public void test_find_allocation_single_stack() {
IP.AddressPool pool = createNode(ImmutableSet.of(
"::1",
"::2",
"::3"
)).ipAddressPool();
resolver.addRecord("host1", "::2");
resolver.addRecord("host2", "::3");
resolver.addRecord("host3", "::1");
resolver.addReverseRecord("::3", "host2");
resolver.addReverseRecord("::1", "host3");
resolver.addReverseRecord("::2", "host1");
Optional<IP.Allocation> allocation = pool.findAllocation(emptyList, resolver);
assertEquals("::1", allocation.get().ipv6Address());
Assert.assertFalse(allocation.get().ipv4Address().isPresent());
assertEquals("host3", allocation.get().hostname());
resolver.removeRecord("host3");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (Exception e) {
assertEquals("java.net.UnknownHostException: Could not resolve: host3", e.getMessage());
}
}
@Test
public void test_find_allocation_dual_stack() {
IP.AddressPool pool = dualStackPool();
Optional<IP.Allocation> allocation = pool.findAllocation(emptyList, resolver);
assertEquals("::1", allocation.get().ipv6Address());
assertEquals("127.0.0.2", allocation.get().ipv4Address().get());
assertEquals("host3", allocation.get().hostname());
}
@Test
public void test_find_allocation_multiple_ipv4_addresses() {
IP.AddressPool pool = dualStackPool();
resolver.addRecord("host3", "127.0.0.127");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Hostname host3 resolved to more than 1 IPv4 address: [127.0.0.2, 127.0.0.127]",
e.getMessage());
}
}
@Test
private IP.AddressPool dualStackPool() {
Node node = createNode(ImmutableSet.of(
"127.0.0.1",
"127.0.0.2",
"127.0.0.3",
"::1",
"::2",
"::3"
));
resolver.addRecord("host1", "127.0.0.3")
.addRecord("host2", "127.0.0.1")
.addRecord("host3", "127.0.0.2")
.addReverseRecord("127.0.0.1", "host2")
.addReverseRecord("127.0.0.2", "host3")
.addReverseRecord("127.0.0.3", "host1");
resolver.addRecord("host1", "::2")
.addRecord("host2", "::3")
.addRecord("host3", "::1")
.addReverseRecord("::3", "host2")
.addReverseRecord("::1", "host3")
.addReverseRecord("::2", "host1");
return node.ipAddressPool();
}
private static Node createNode(Set<String> ipAddresses) {
return Node.create("id1", Collections.singleton("127.0.0.1"), ipAddresses,
"host1", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"),
NodeType.host);
}
} | class IPTest {
private static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default");
private static final NodeList emptyList = new NodeList(Collections.emptyList());
private MockNameResolver resolver;
@Before
public void before() {
resolver = new MockNameResolver().explicitReverseRecords();
}
@Test
public void test_natural_order() {
Set<String> ipAddresses = ImmutableSet.of(
"192.168.254.1",
"192.168.254.254",
"127.7.3.1",
"127.5.254.1",
"172.16.100.1",
"172.16.254.2",
"2001:db8:0:0:0:0:0:ffff",
"2001:db8:95a3:0:0:0:0:7334",
"2001:db8:85a3:0:0:8a2e:370:7334",
"::1",
"::10",
"::20");
assertEquals(
Arrays.asList(
"127.5.254.1",
"127.7.3.1",
"172.16.100.1",
"172.16.254.2",
"192.168.254.1",
"192.168.254.254",
"::1",
"::10",
"::20",
"2001:db8:0:0:0:0:0:ffff",
"2001:db8:85a3:0:0:8a2e:370:7334",
"2001:db8:95a3:0:0:0:0:7334"),
new ArrayList<>(ImmutableSortedSet.copyOf(IP.naturalOrder, ipAddresses))
);
}
@Test
public void test_find_allocation_single_stack() {
IP.AddressPool pool = createNode(ImmutableSet.of(
"::1",
"::2",
"::3"
)).ipAddressPool();
resolver.addRecord("host1", "::2");
resolver.addRecord("host2", "::3");
resolver.addRecord("host3", "::1");
resolver.addReverseRecord("::3", "host2");
resolver.addReverseRecord("::1", "host3");
resolver.addReverseRecord("::2", "host1");
Optional<IP.Allocation> allocation = pool.findAllocation(emptyList, resolver);
assertEquals("::1", allocation.get().ipv6Address());
Assert.assertFalse(allocation.get().ipv4Address().isPresent());
assertEquals("host3", allocation.get().hostname());
resolver.removeRecord("host3");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (Exception e) {
assertEquals("java.net.UnknownHostException: Could not resolve: host3", e.getMessage());
}
}
@Test
public void test_find_allocation_dual_stack() {
IP.AddressPool pool = dualStackPool();
Optional<IP.Allocation> allocation = pool.findAllocation(emptyList, resolver);
assertEquals("::1", allocation.get().ipv6Address());
assertEquals("127.0.0.2", allocation.get().ipv4Address().get());
assertEquals("host3", allocation.get().hostname());
}
@Test
public void test_find_allocation_multiple_ipv4_addresses() {
IP.AddressPool pool = dualStackPool();
resolver.addRecord("host3", "127.0.0.127");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Hostname host3 resolved to more than 1 IPv4 address: [127.0.0.2, 127.0.0.127]",
e.getMessage());
}
}
@Test
private IP.AddressPool dualStackPool() {
Node node = createNode(ImmutableSet.of(
"127.0.0.1",
"127.0.0.2",
"127.0.0.3",
"::1",
"::2",
"::3"
));
resolver.addRecord("host1", "127.0.0.3")
.addRecord("host2", "127.0.0.1")
.addRecord("host3", "127.0.0.2")
.addReverseRecord("127.0.0.1", "host2")
.addReverseRecord("127.0.0.2", "host3")
.addReverseRecord("127.0.0.3", "host1");
resolver.addRecord("host1", "::2")
.addRecord("host2", "::3")
.addRecord("host3", "::1")
.addReverseRecord("::3", "host2")
.addReverseRecord("::1", "host3")
.addReverseRecord("::2", "host1");
return node.ipAddressPool();
}
private static Node createNode(Set<String> ipAddresses) {
return Node.create("id1", Collections.singleton("127.0.0.1"), ipAddresses,
"host1", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"),
NodeType.host);
}
} | |
Yes, thanks! | public void test_find_allocation_invalid_ipv4_reverse_record() {
IP.AddressPool pool = dualStackPool();
resolver.removeRecord("127.0.0.2")
.addReverseRecord("127.0.0.2", "host5");
try {
pool.findAllocation(emptyList, resolver);
} catch (IllegalArgumentException e) {
assertEquals("Hostnames resolved from each IP address do not point to the same hostname " +
"[::1 -> host3, 127.0.0.2 -> host5]", e.getMessage());
}
} | public void test_find_allocation_invalid_ipv4_reverse_record() {
IP.AddressPool pool = dualStackPool();
resolver.removeRecord("127.0.0.2")
.addReverseRecord("127.0.0.2", "host5");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Hostnames resolved from each IP address do not point to the same hostname " +
"[::1 -> host3, 127.0.0.2 -> host5]", e.getMessage());
}
} | class IPTest {
private static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default");
private static final NodeList emptyList = new NodeList(Collections.emptyList());
private MockNameResolver resolver;
@Before
public void before() {
resolver = new MockNameResolver().explicitReverseRecords();
}
@Test
public void test_natural_order() {
Set<String> ipAddresses = ImmutableSet.of(
"192.168.254.1",
"192.168.254.254",
"127.7.3.1",
"127.5.254.1",
"172.16.100.1",
"172.16.254.2",
"2001:db8:0:0:0:0:0:ffff",
"2001:db8:95a3:0:0:0:0:7334",
"2001:db8:85a3:0:0:8a2e:370:7334",
"::1",
"::10",
"::20");
assertEquals(
Arrays.asList(
"127.5.254.1",
"127.7.3.1",
"172.16.100.1",
"172.16.254.2",
"192.168.254.1",
"192.168.254.254",
"::1",
"::10",
"::20",
"2001:db8:0:0:0:0:0:ffff",
"2001:db8:85a3:0:0:8a2e:370:7334",
"2001:db8:95a3:0:0:0:0:7334"),
new ArrayList<>(ImmutableSortedSet.copyOf(IP.naturalOrder, ipAddresses))
);
}
@Test
public void test_find_allocation_single_stack() {
IP.AddressPool pool = createNode(ImmutableSet.of(
"::1",
"::2",
"::3"
)).ipAddressPool();
resolver.addRecord("host1", "::2");
resolver.addRecord("host2", "::3");
resolver.addRecord("host3", "::1");
resolver.addReverseRecord("::3", "host2");
resolver.addReverseRecord("::1", "host3");
resolver.addReverseRecord("::2", "host1");
Optional<IP.Allocation> allocation = pool.findAllocation(emptyList, resolver);
assertEquals("::1", allocation.get().ipv6Address());
Assert.assertFalse(allocation.get().ipv4Address().isPresent());
assertEquals("host3", allocation.get().hostname());
resolver.removeRecord("host3");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (Exception e) {
assertEquals("java.net.UnknownHostException: Could not resolve: host3", e.getMessage());
}
}
@Test
public void test_find_allocation_dual_stack() {
IP.AddressPool pool = dualStackPool();
Optional<IP.Allocation> allocation = pool.findAllocation(emptyList, resolver);
assertEquals("::1", allocation.get().ipv6Address());
assertEquals("127.0.0.2", allocation.get().ipv4Address().get());
assertEquals("host3", allocation.get().hostname());
}
@Test
public void test_find_allocation_multiple_ipv4_addresses() {
IP.AddressPool pool = dualStackPool();
resolver.addRecord("host3", "127.0.0.127");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Hostname host3 resolved to more than 1 IPv4 address: [127.0.0.2, 127.0.0.127]",
e.getMessage());
}
}
@Test
private IP.AddressPool dualStackPool() {
Node node = createNode(ImmutableSet.of(
"127.0.0.1",
"127.0.0.2",
"127.0.0.3",
"::1",
"::2",
"::3"
));
resolver.addRecord("host1", "127.0.0.3")
.addRecord("host2", "127.0.0.1")
.addRecord("host3", "127.0.0.2")
.addReverseRecord("127.0.0.1", "host2")
.addReverseRecord("127.0.0.2", "host3")
.addReverseRecord("127.0.0.3", "host1");
resolver.addRecord("host1", "::2")
.addRecord("host2", "::3")
.addRecord("host3", "::1")
.addReverseRecord("::3", "host2")
.addReverseRecord("::1", "host3")
.addReverseRecord("::2", "host1");
return node.ipAddressPool();
}
private static Node createNode(Set<String> ipAddresses) {
return Node.create("id1", Collections.singleton("127.0.0.1"), ipAddresses,
"host1", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"),
NodeType.host);
}
} | class IPTest {
private static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default");
private static final NodeList emptyList = new NodeList(Collections.emptyList());
private MockNameResolver resolver;
@Before
public void before() {
resolver = new MockNameResolver().explicitReverseRecords();
}
@Test
public void test_natural_order() {
Set<String> ipAddresses = ImmutableSet.of(
"192.168.254.1",
"192.168.254.254",
"127.7.3.1",
"127.5.254.1",
"172.16.100.1",
"172.16.254.2",
"2001:db8:0:0:0:0:0:ffff",
"2001:db8:95a3:0:0:0:0:7334",
"2001:db8:85a3:0:0:8a2e:370:7334",
"::1",
"::10",
"::20");
assertEquals(
Arrays.asList(
"127.5.254.1",
"127.7.3.1",
"172.16.100.1",
"172.16.254.2",
"192.168.254.1",
"192.168.254.254",
"::1",
"::10",
"::20",
"2001:db8:0:0:0:0:0:ffff",
"2001:db8:85a3:0:0:8a2e:370:7334",
"2001:db8:95a3:0:0:0:0:7334"),
new ArrayList<>(ImmutableSortedSet.copyOf(IP.naturalOrder, ipAddresses))
);
}
@Test
public void test_find_allocation_single_stack() {
IP.AddressPool pool = createNode(ImmutableSet.of(
"::1",
"::2",
"::3"
)).ipAddressPool();
resolver.addRecord("host1", "::2");
resolver.addRecord("host2", "::3");
resolver.addRecord("host3", "::1");
resolver.addReverseRecord("::3", "host2");
resolver.addReverseRecord("::1", "host3");
resolver.addReverseRecord("::2", "host1");
Optional<IP.Allocation> allocation = pool.findAllocation(emptyList, resolver);
assertEquals("::1", allocation.get().ipv6Address());
Assert.assertFalse(allocation.get().ipv4Address().isPresent());
assertEquals("host3", allocation.get().hostname());
resolver.removeRecord("host3");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (Exception e) {
assertEquals("java.net.UnknownHostException: Could not resolve: host3", e.getMessage());
}
}
@Test
public void test_find_allocation_dual_stack() {
IP.AddressPool pool = dualStackPool();
Optional<IP.Allocation> allocation = pool.findAllocation(emptyList, resolver);
assertEquals("::1", allocation.get().ipv6Address());
assertEquals("127.0.0.2", allocation.get().ipv4Address().get());
assertEquals("host3", allocation.get().hostname());
}
@Test
public void test_find_allocation_multiple_ipv4_addresses() {
IP.AddressPool pool = dualStackPool();
resolver.addRecord("host3", "127.0.0.127");
try {
pool.findAllocation(emptyList, resolver);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Hostname host3 resolved to more than 1 IPv4 address: [127.0.0.2, 127.0.0.127]",
e.getMessage());
}
}
@Test
private IP.AddressPool dualStackPool() {
Node node = createNode(ImmutableSet.of(
"127.0.0.1",
"127.0.0.2",
"127.0.0.3",
"::1",
"::2",
"::3"
));
resolver.addRecord("host1", "127.0.0.3")
.addRecord("host2", "127.0.0.1")
.addRecord("host3", "127.0.0.2")
.addReverseRecord("127.0.0.1", "host2")
.addReverseRecord("127.0.0.2", "host3")
.addReverseRecord("127.0.0.3", "host1");
resolver.addRecord("host1", "::2")
.addRecord("host2", "::3")
.addRecord("host3", "::1")
.addReverseRecord("::3", "host2")
.addReverseRecord("::1", "host3")
.addReverseRecord("::2", "host1");
return node.ipAddressPool();
}
private static Node createNode(Set<String> ipAddresses) {
return Node.create("id1", Collections.singleton("127.0.0.1"), ipAddresses,
"host1", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"),
NodeType.host);
}
} | |
It looks like its not possible to unset a target version once it has been set, so in that regard this is fine... However, if that were to change, this should also actually deallocate nodes allocated to this application? As this stand right now, those nodes would still be in `active`, but `DuperModel` wouldn't know anything about them? | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | duperModel.infraApplicationRemoved(application.getApplicationId()); | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} |
Yes, I think the InfrastructureProvisioner should then deallocate those nodes, and it sounds right that the DuperModel would not know about that application then. | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | duperModel.infraApplicationRemoved(application.getApplicationId()); | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} |
Done. | public void handleRequest(BaseRequest request) throws Exception {
BaseResponse response = new BaseResponse();
LOG.debug("receive http request. url=", request.getRequest().uri());
execute(request, response);
} | LOG.debug("receive http request. url=", request.getRequest().uri()); | public void handleRequest(BaseRequest request) throws Exception {
BaseResponse response = new BaseResponse();
LOG.debug("receive http request. url={}", request.getRequest().uri());
execute(request, response);
} | class BaseAction implements IAction {
private static final Logger LOG = LogManager.getLogger(BaseAction.class);
protected QeService qeService = null;
protected ActionController controller;
protected Catalog catalog;
public BaseAction(ActionController controller) {
this.controller = controller;
this.catalog = Catalog.getInstance();
}
public QeService getQeService() {
return qeService;
}
public void setQeService(QeService qeService) {
this.qeService = qeService;
}
@Override
public abstract void execute(BaseRequest request, BaseResponse response) throws DdlException;
protected void writeResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status) {
FullHttpResponse responseObj = null;
try {
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes("UTF-8")));
} catch (UnsupportedEncodingException e) {
LOG.warn("get exception.", e);
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes()));
}
Preconditions.checkNotNull(responseObj);
HttpMethod method = request.getRequest().method();
checkDefaultContentTypeHeader(response, responseObj);
if (!method.equals(HttpMethod.HEAD)) {
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH,
String.valueOf(responseObj.content().readableBytes()));
}
writeCustomHeaders(response, responseObj);
writeCookies(response, responseObj);
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
request.getContext().write(responseObj).addListener(ChannelFutureListener.CLOSE);
} else {
responseObj.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
request.getContext().write(responseObj);
}
}
protected void writeFileResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status,
File resFile) {
HttpResponse responseObj = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
if (HttpHeaders.isKeepAlive(request.getRequest())) {
response.updateHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
}
ChannelFuture sendFileFuture;
ChannelFuture lastContentFuture;
RandomAccessFile rafFile;
try {
rafFile = new RandomAccessFile(resFile, "r");
long fileLength = 0;
fileLength = rafFile.length();
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(fileLength));
writeCookies(response, responseObj);
writeCustomHeaders(response, responseObj);
request.getContext().write(responseObj);
if (request.getContext().pipeline().get(SslHandler.class) == null) {
sendFileFuture = request.getContext().write(new DefaultFileRegion(rafFile.getChannel(), 0, fileLength),
request.getContext().newProgressivePromise());
lastContentFuture = request.getContext().writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
sendFileFuture = request.getContext().writeAndFlush(
new HttpChunkedInput(new ChunkedFile(rafFile, 0, fileLength, 8192)),
request.getContext().newProgressivePromise());
lastContentFuture = sendFileFuture;
}
} catch (FileNotFoundException ignore) {
writeResponse(request, response, HttpResponseStatus.NOT_FOUND);
return;
} catch (IOException e1) {
writeResponse(request, response, HttpResponseStatus.INTERNAL_SERVER_ERROR);
return;
}
sendFileFuture.addListener(new ChannelProgressiveFutureListener() {
@Override
public void operationProgressed(ChannelProgressiveFuture future, long progress, long total) {
if (total < 0) {
LOG.debug("{} Transfer progress: {}", future.channel(), progress);
} else {
LOG.debug("{} Transfer progress: {} / {}", future.channel(), progress, total);
}
}
@Override
public void operationComplete(ChannelProgressiveFuture future) {
LOG.debug("{} Transfer complete.", future.channel());
if (!future.isSuccess()) {
Throwable cause = future.cause();
LOG.error("something wrong. ", cause);
}
}
});
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
}
protected void checkDefaultContentTypeHeader(BaseResponse response, Object responseOj) {
List<String> header = response.getCustomHeaders().get(HttpHeaders.Names.CONTENT_TYPE);
if (header == null) {
response.updateHeader(HttpHeaders.Names.CONTENT_TYPE, "text/html");
}
}
protected void writeCustomHeaders(BaseResponse response, HttpResponse responseObj) {
for (Map.Entry<String, List<String>> entry : response.getHeaders().entrySet()) {
responseObj.headers().add(entry.getKey(), entry.getValue());
}
}
protected void writeCookies(BaseResponse response, HttpResponse responseObj) {
for (Cookie cookie : response.getCookies()) {
responseObj.headers().add(HttpHeaders.Names.SET_COOKIE, ServerCookieEncoder.encode(cookie));
}
}
public static class AuthorizationInfo {
public String fullUserName;
public String password;
public String cluster;
}
public boolean parseAuth(BaseRequest request, AuthorizationInfo authInfo) {
String encodedAuthString = request.getAuthorizationHeader();
if (Strings.isNullOrEmpty(encodedAuthString)) {
return false;
}
String[] parts = encodedAuthString.split(" ");
if (parts.length != 2) {
return false;
}
encodedAuthString = parts[1];
ByteBuf buf = null;
try {
buf = Unpooled.copiedBuffer(ByteBuffer.wrap(encodedAuthString.getBytes()));
String authString = Base64.decode(buf).toString(CharsetUtil.UTF_8);
int index = authString.indexOf(":");
authInfo.fullUserName = authString.substring(0, index);
final String[] elements = authInfo.fullUserName.split("@");
if (elements != null && elements.length < 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER,
authInfo.fullUserName);
authInfo.cluster = SystemInfoService.DEFAULT_CLUSTER;
} else if (elements != null && elements.length == 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(elements[1], elements[0]);
authInfo.cluster = elements[1];
}
authInfo.password = authString.substring(index + 1);
} finally {
if (buf != null) {
buf.release();
}
}
return true;
}
private AuthorizationInfo checkAndGetUser(BaseRequest request)
throws UnauthorizedException {
AuthorizationInfo authInfo = new AuthorizationInfo();
if (!parseAuth(request, authInfo)) {
throw new UnauthorizedException("Need auth information.");
}
byte[] hashedPasswd = catalog.getUserMgr().getPassword(authInfo.fullUserName);
if (hashedPasswd == null) {
throw new UnauthorizedException("No such user(" + authInfo.fullUserName + ")");
}
if (!MysqlPassword.checkPlainPass(hashedPasswd, authInfo.password)) {
throw new UnauthorizedException("Password error");
}
return authInfo;
}
protected void checkAdmin(BaseRequest request) throws UnauthorizedException {
final AuthorizationInfo authInfo = checkAndGetUser(request);
if (!catalog.getUserMgr().isAdmin(authInfo.fullUserName)) {
throw new UnauthorizedException("Administrator needed");
}
}
protected void checkReadPriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_ONLY)) {
throw new UnauthorizedException("Read Privilege needed");
}
}
protected void checkWritePriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_WRITE)) {
throw new UnauthorizedException("Write Privilege needed");
}
}
public AuthorizationInfo getAuthorizationInfo(BaseRequest request)
throws UnauthorizedException {
return checkAndGetUser(request);
}
protected void writeAuthResponse(BaseRequest request, BaseResponse response) {
response.addHeader(HttpHeaders.Names.WWW_AUTHENTICATE, "Basic realm=\"\"");
writeResponse(request, response, HttpResponseStatus.UNAUTHORIZED);
}
} | class BaseAction implements IAction {
private static final Logger LOG = LogManager.getLogger(BaseAction.class);
protected QeService qeService = null;
protected ActionController controller;
protected Catalog catalog;
public BaseAction(ActionController controller) {
this.controller = controller;
this.catalog = Catalog.getInstance();
}
public QeService getQeService() {
return qeService;
}
public void setQeService(QeService qeService) {
this.qeService = qeService;
}
@Override
public abstract void execute(BaseRequest request, BaseResponse response) throws DdlException;
protected void writeResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status) {
FullHttpResponse responseObj = null;
try {
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes("UTF-8")));
} catch (UnsupportedEncodingException e) {
LOG.warn("get exception.", e);
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes()));
}
Preconditions.checkNotNull(responseObj);
HttpMethod method = request.getRequest().method();
checkDefaultContentTypeHeader(response, responseObj);
if (!method.equals(HttpMethod.HEAD)) {
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH,
String.valueOf(responseObj.content().readableBytes()));
}
writeCustomHeaders(response, responseObj);
writeCookies(response, responseObj);
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
request.getContext().write(responseObj).addListener(ChannelFutureListener.CLOSE);
} else {
responseObj.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
request.getContext().write(responseObj);
}
}
protected void writeFileResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status,
File resFile) {
HttpResponse responseObj = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
if (HttpHeaders.isKeepAlive(request.getRequest())) {
response.updateHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
}
ChannelFuture sendFileFuture;
ChannelFuture lastContentFuture;
RandomAccessFile rafFile;
try {
rafFile = new RandomAccessFile(resFile, "r");
long fileLength = 0;
fileLength = rafFile.length();
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(fileLength));
writeCookies(response, responseObj);
writeCustomHeaders(response, responseObj);
request.getContext().write(responseObj);
if (request.getContext().pipeline().get(SslHandler.class) == null) {
sendFileFuture = request.getContext().write(new DefaultFileRegion(rafFile.getChannel(), 0, fileLength),
request.getContext().newProgressivePromise());
lastContentFuture = request.getContext().writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
sendFileFuture = request.getContext().writeAndFlush(
new HttpChunkedInput(new ChunkedFile(rafFile, 0, fileLength, 8192)),
request.getContext().newProgressivePromise());
lastContentFuture = sendFileFuture;
}
} catch (FileNotFoundException ignore) {
writeResponse(request, response, HttpResponseStatus.NOT_FOUND);
return;
} catch (IOException e1) {
writeResponse(request, response, HttpResponseStatus.INTERNAL_SERVER_ERROR);
return;
}
sendFileFuture.addListener(new ChannelProgressiveFutureListener() {
@Override
public void operationProgressed(ChannelProgressiveFuture future, long progress, long total) {
if (total < 0) {
LOG.debug("{} Transfer progress: {}", future.channel(), progress);
} else {
LOG.debug("{} Transfer progress: {} / {}", future.channel(), progress, total);
}
}
@Override
public void operationComplete(ChannelProgressiveFuture future) {
LOG.debug("{} Transfer complete.", future.channel());
if (!future.isSuccess()) {
Throwable cause = future.cause();
LOG.error("something wrong. ", cause);
}
}
});
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
}
protected void checkDefaultContentTypeHeader(BaseResponse response, Object responseOj) {
List<String> header = response.getCustomHeaders().get(HttpHeaders.Names.CONTENT_TYPE);
if (header == null) {
response.updateHeader(HttpHeaders.Names.CONTENT_TYPE, "text/html");
}
}
protected void writeCustomHeaders(BaseResponse response, HttpResponse responseObj) {
for (Map.Entry<String, List<String>> entry : response.getHeaders().entrySet()) {
responseObj.headers().add(entry.getKey(), entry.getValue());
}
}
protected void writeCookies(BaseResponse response, HttpResponse responseObj) {
for (Cookie cookie : response.getCookies()) {
responseObj.headers().add(HttpHeaders.Names.SET_COOKIE, ServerCookieEncoder.encode(cookie));
}
}
public static class AuthorizationInfo {
public String fullUserName;
public String password;
public String cluster;
}
public boolean parseAuth(BaseRequest request, AuthorizationInfo authInfo) {
String encodedAuthString = request.getAuthorizationHeader();
if (Strings.isNullOrEmpty(encodedAuthString)) {
return false;
}
String[] parts = encodedAuthString.split(" ");
if (parts.length != 2) {
return false;
}
encodedAuthString = parts[1];
ByteBuf buf = null;
try {
buf = Unpooled.copiedBuffer(ByteBuffer.wrap(encodedAuthString.getBytes()));
String authString = Base64.decode(buf).toString(CharsetUtil.UTF_8);
int index = authString.indexOf(":");
authInfo.fullUserName = authString.substring(0, index);
final String[] elements = authInfo.fullUserName.split("@");
if (elements != null && elements.length < 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER,
authInfo.fullUserName);
authInfo.cluster = SystemInfoService.DEFAULT_CLUSTER;
} else if (elements != null && elements.length == 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(elements[1], elements[0]);
authInfo.cluster = elements[1];
}
authInfo.password = authString.substring(index + 1);
} finally {
if (buf != null) {
buf.release();
}
}
return true;
}
private AuthorizationInfo checkAndGetUser(BaseRequest request)
throws UnauthorizedException {
AuthorizationInfo authInfo = new AuthorizationInfo();
if (!parseAuth(request, authInfo)) {
throw new UnauthorizedException("Need auth information.");
}
byte[] hashedPasswd = catalog.getUserMgr().getPassword(authInfo.fullUserName);
if (hashedPasswd == null) {
throw new UnauthorizedException("No such user(" + authInfo.fullUserName + ")");
}
if (!MysqlPassword.checkPlainPass(hashedPasswd, authInfo.password)) {
throw new UnauthorizedException("Password error");
}
return authInfo;
}
protected void checkAdmin(BaseRequest request) throws UnauthorizedException {
final AuthorizationInfo authInfo = checkAndGetUser(request);
if (!catalog.getUserMgr().isAdmin(authInfo.fullUserName)) {
throw new UnauthorizedException("Administrator needed");
}
}
protected void checkReadPriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_ONLY)) {
throw new UnauthorizedException("Read Privilege needed");
}
}
protected void checkWritePriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_WRITE)) {
throw new UnauthorizedException("Write Privilege needed");
}
}
public AuthorizationInfo getAuthorizationInfo(BaseRequest request)
throws UnauthorizedException {
return checkAndGetUser(request);
}
protected void writeAuthResponse(BaseRequest request, BaseResponse response) {
response.addHeader(HttpHeaders.Names.WWW_AUTHENTICATE, "Basic realm=\"\"");
writeResponse(request, response, HttpResponseStatus.UNAUTHORIZED);
}
} |
Right, it should, but it doesn't. Remove the removal and add a TODO instead? | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | duperModel.infraApplicationRemoved(application.getApplicationId()); | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} |
Why should the infraApplicationRemoved be removed? It's the correct long-term thing? Not doing anything and leaving a stale application until a config server restart doesn't seem better? | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | duperModel.infraApplicationRemoved(application.getApplicationId()); | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} |
Btw, to remove an application today, one would have to delete the nodes and unset the target version. That's feasible with this code without restarting config server, if only one somehow manages to remove the target version. If the infraApplicationRemoved is removed, it is not feasible. | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | duperModel.infraApplicationRemoved(application.getApplicationId()); | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} |
I have added a todo on removing active nodes if the target version is empty. | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | duperModel.infraApplicationRemoved(application.getApplicationId()); | protected void maintain() {
for (InfraApplicationApi application: duperModel.getSupportedInfraApplications()) {
try (Mutex lock = nodeRepository().lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (!targetVersion.isPresent()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No target version set");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
List<Version> wantedVersions = nodeRepository()
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive)
.stream()
.map(node -> node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion())
.orElse(null))
.collect(Collectors.toList());
if (wantedVersions.isEmpty()) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType + ": No nodes to provision");
duperModel.infraApplicationRemoved(application.getApplicationId());
continue;
}
if (wantedVersions.stream().allMatch(targetVersion.get()::equals) &&
duperModel.infraApplicationIsActive(application.getApplicationId())) {
logger.log(LogLevel.DEBUG, "Skipping provision of " + nodeType +
": Already provisioned to target version " + targetVersion);
continue;
}
List<HostSpec> hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
duperModel.infraApplicationActivated(
application.getApplicationId(),
hostSpecs.stream().map(HostSpec::hostname).map(HostName::from).collect(Collectors.toList()));
} catch (RuntimeException e) {
logger.log(LogLevel.INFO, "Failed to activate " + application.getApplicationId(), e);
}
}
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} | class InfrastructureProvisioner extends Maintainer {
private static final Logger logger = Logger.getLogger(InfrastructureProvisioner.class.getName());
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
private final DuperModelInfraApi duperModel;
public InfrastructureProvisioner(Provisioner provisioner, NodeRepository nodeRepository,
InfrastructureVersions infrastructureVersions, Duration interval, JobControl jobControl,
DuperModelInfraApi duperModel) {
super(nodeRepository, interval, jobControl);
this.provisioner = provisioner;
this.infrastructureVersions = infrastructureVersions;
this.duperModel = duperModel;
}
@Override
} |
I assume this method is a NOP when there are no slobroks, except for the log message? | private Register registerInSlobrok(SlobroksConfig slobrokConfig, QrConfig qrConfig) {
SlobrokList slobrokList = new SlobrokList();
slobrokList.setup(slobrokConfig.slobrok().stream().map(SlobroksConfig.Slobrok::connectionspec).toArray(String[]::new));
Spec mySpec = new Spec(HostName.getLocalhost(), qrConfig.rpc().port());
Register slobrokRegistrator = new Register(new Supervisor(new Transport()), slobrokList, mySpec);
slobrokRegistrator.registerName(qrConfig.rpc().slobrokId());
log.log(LogLevel.INFO,
"Registered name '" + qrConfig.rpc().slobrokId() + "' at " + mySpec + " with: " + slobrokList);
return slobrokRegistrator;
} | log.log(LogLevel.INFO, | private Register registerInSlobrok(SlobroksConfig slobrokConfig, QrConfig qrConfig) {
SlobrokList slobrokList = new SlobrokList();
slobrokList.setup(slobrokConfig.slobrok().stream().map(SlobroksConfig.Slobrok::connectionspec).toArray(String[]::new));
Spec mySpec = new Spec(HostName.getLocalhost(), qrConfig.rpc().port());
Register slobrokRegistrator = new Register(new Supervisor(new Transport()), slobrokList, mySpec);
slobrokRegistrator.registerName(qrConfig.rpc().slobrokId());
log.log(LogLevel.INFO,
"Registered name '" + qrConfig.rpc().slobrokId() + "' at " + mySpec + " with: " + slobrokList);
return slobrokRegistrator;
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} |
I assume so too, but why do you ask? I think we only have two cases: Standalone, where the model will not enable rpc (as far as I can tell) and so not make it to this code, and clustered, where there are always slobroks. | private Register registerInSlobrok(SlobroksConfig slobrokConfig, QrConfig qrConfig) {
SlobrokList slobrokList = new SlobrokList();
slobrokList.setup(slobrokConfig.slobrok().stream().map(SlobroksConfig.Slobrok::connectionspec).toArray(String[]::new));
Spec mySpec = new Spec(HostName.getLocalhost(), qrConfig.rpc().port());
Register slobrokRegistrator = new Register(new Supervisor(new Transport()), slobrokList, mySpec);
slobrokRegistrator.registerName(qrConfig.rpc().slobrokId());
log.log(LogLevel.INFO,
"Registered name '" + qrConfig.rpc().slobrokId() + "' at " + mySpec + " with: " + slobrokList);
return slobrokRegistrator;
} | log.log(LogLevel.INFO, | private Register registerInSlobrok(SlobroksConfig slobrokConfig, QrConfig qrConfig) {
SlobrokList slobrokList = new SlobrokList();
slobrokList.setup(slobrokConfig.slobrok().stream().map(SlobroksConfig.Slobrok::connectionspec).toArray(String[]::new));
Spec mySpec = new Spec(HostName.getLocalhost(), qrConfig.rpc().port());
Register slobrokRegistrator = new Register(new Supervisor(new Transport()), slobrokList, mySpec);
slobrokRegistrator.registerName(qrConfig.rpc().slobrokId());
log.log(LogLevel.INFO,
"Registered name '" + qrConfig.rpc().slobrokId() + "' at " + mySpec + " with: " + slobrokList);
return slobrokRegistrator;
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} |
I forgot for a moment that there is a separate StandaloneContainerApplication. | private Register registerInSlobrok(SlobroksConfig slobrokConfig, QrConfig qrConfig) {
SlobrokList slobrokList = new SlobrokList();
slobrokList.setup(slobrokConfig.slobrok().stream().map(SlobroksConfig.Slobrok::connectionspec).toArray(String[]::new));
Spec mySpec = new Spec(HostName.getLocalhost(), qrConfig.rpc().port());
Register slobrokRegistrator = new Register(new Supervisor(new Transport()), slobrokList, mySpec);
slobrokRegistrator.registerName(qrConfig.rpc().slobrokId());
log.log(LogLevel.INFO,
"Registered name '" + qrConfig.rpc().slobrokId() + "' at " + mySpec + " with: " + slobrokList);
return slobrokRegistrator;
} | log.log(LogLevel.INFO, | private Register registerInSlobrok(SlobroksConfig slobrokConfig, QrConfig qrConfig) {
SlobrokList slobrokList = new SlobrokList();
slobrokList.setup(slobrokConfig.slobrok().stream().map(SlobroksConfig.Slobrok::connectionspec).toArray(String[]::new));
Spec mySpec = new Spec(HostName.getLocalhost(), qrConfig.rpc().port());
Register slobrokRegistrator = new Register(new Supervisor(new Transport()), slobrokList, mySpec);
slobrokRegistrator.registerName(qrConfig.rpc().slobrokId());
log.log(LogLevel.INFO,
"Registered name '" + qrConfig.rpc().slobrokId() + "' at " + mySpec + " with: " + slobrokList);
return slobrokRegistrator;
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} |
this will blow up if (when) multiple transports share a crypto-engine | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | try { cryptoEngine.close(); } catch (Exception e) {} | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} |
Correct. No usage like that as of now. Any suggestions on how this should be solved. Add a factory and always provide your own, or always provide from the outside. | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | try { cryptoEngine.close(); } catch (Exception e) {} | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} |
In C++ we have a get_default instead of 'create', which lets all the default engines be the same shared object (which is configured based on environment variables). Since static stuff is more frowned upon in Java, we might want to pass it in from the outside in order to share it. Even if it is a bit ugly, I think the balance between convenience and forceful correctness is to perform conditional close based on self-create or passed-in crypto engine. | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | try { cryptoEngine.close(); } catch (Exception e) {} | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} |
When you pass it in from the outside you can wrap it in a proxy with a noop close(), and handle the close on the outside. | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | try { cryptoEngine.close(); } catch (Exception e) {} | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} |
It's just wrong to call close on something which you didn't open yourself. | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | try { cryptoEngine.close(); } catch (Exception e) {} | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} |
Currently it is created by self. But there is an api that lets you do both, but that is not used for anything but testing different crypto engines. So this @bjorncs or @havardpe will fix properly when they allw for a single shared cryptoengine. | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | try { cryptoEngine.close(); } catch (Exception e) {} | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
connector.shutdown().waitDone();
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
closer.shutdown().join();
connector.exit().join();
try { cryptoEngine.close(); } catch (Exception e) {}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} |
This is to avoid initializing -> initializing log message at startup | public void status(Status status) {
if (status != this.status) {
log.log(LogLevel.INFO, "Changing health status code from '" + this.status + "' to '" + status.name() + "'");
this.status = status;
}
} | if (status != this.status) { | public void status(Status status) {
if (status != this.status) {
log.log(LogLevel.INFO, "Changing health status code from '" + this.status + "' to '" + status.name() + "'");
this.status = status;
}
} | class StateMonitor extends AbstractComponent {
private final static Logger log = Logger.getLogger(StateMonitor.class.getName());
public enum Status {up, down, initializing};
private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>();
private final Thread thread;
private final Timer timer;
private final long snapshotIntervalMs;
private volatile long lastSnapshotTimeMs;
private volatile MetricSnapshot snapshot;
private volatile Status status;
private final TreeSet<String> valueNames = new TreeSet<>();
@Inject
public StateMonitor(HealthMonitorConfig config, Timer timer) {
this(config, timer, runnable -> {
Thread thread = new Thread(runnable, "StateMonitor");
thread.setDaemon(true);
return thread;
});
}
StateMonitor(HealthMonitorConfig config, Timer timer, ThreadFactory threadFactory) {
this.timer = timer;
this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1));
this.lastSnapshotTimeMs = timer.currentTimeMillis();
this.status = Status.valueOf(config.initialStatus());
thread = threadFactory.newThread(this::run);
thread.start();
}
/** Returns a metric consumer for jDisc which will write metrics back to this */
public MetricConsumer newMetricConsumer() {
StateMetricConsumer consumer = new StateMetricConsumer();
consumers.add(consumer);
return consumer;
}
public Status status() { return status; }
/** Returns the last snapshot taken of the metrics in this system */
public MetricSnapshot snapshot() {
return snapshot;
}
/** Returns the interval between each metrics snapshot used by this */
public long getSnapshotIntervalMillis() { return snapshotIntervalMs; }
/** NOTE: For unit testing only. May lead to undefined behaviour if StateMonitor thread is running simultaneously **/
boolean checkTime() {
long now = timer.currentTimeMillis();
if (now < lastSnapshotTimeMs + snapshotIntervalMs) {
return false;
}
snapshot = createSnapshot(lastSnapshotTimeMs, now);
lastSnapshotTimeMs = now;
return true;
}
private void run() {
log.finest("StateMonitor started.");
try {
while (!Thread.interrupted()) {
checkTime();
Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis());
}
} catch (InterruptedException e) {
}
log.finest("StateMonitor stopped.");
}
private MetricSnapshot createSnapshot(long fromMillis, long toMillis) {
MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS);
for (StateMetricConsumer consumer : consumers) {
snapshot.add(consumer.createSnapshot());
}
updateNames(snapshot);
return snapshot;
}
private void updateNames(MetricSnapshot current) {
TreeSet<String> seen = new TreeSet<>();
for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) {
for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) {
seen.add(nameAndMetric.getKey());
}
}
synchronized (valueNames) {
for (String name : valueNames) {
if (!seen.contains(name)) {
current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0);
}
}
valueNames.addAll(seen);
}
}
@Override
public void deconstruct() {
thread.interrupt();
try {
thread.join(5000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
if (thread.isAlive()) {
log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring.");
}
}
} | class StateMonitor extends AbstractComponent {
private final static Logger log = Logger.getLogger(StateMonitor.class.getName());
public enum Status {up, down, initializing};
private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>();
private final Thread thread;
private final Timer timer;
private final long snapshotIntervalMs;
private volatile long lastSnapshotTimeMs;
private volatile MetricSnapshot snapshot;
private volatile Status status;
private final TreeSet<String> valueNames = new TreeSet<>();
@Inject
public StateMonitor(HealthMonitorConfig config, Timer timer) {
this(config, timer, runnable -> {
Thread thread = new Thread(runnable, "StateMonitor");
thread.setDaemon(true);
return thread;
});
}
StateMonitor(HealthMonitorConfig config, Timer timer, ThreadFactory threadFactory) {
this.timer = timer;
this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1));
this.lastSnapshotTimeMs = timer.currentTimeMillis();
this.status = Status.valueOf(config.initialStatus());
thread = threadFactory.newThread(this::run);
thread.start();
}
/** Returns a metric consumer for jDisc which will write metrics back to this */
public MetricConsumer newMetricConsumer() {
StateMetricConsumer consumer = new StateMetricConsumer();
consumers.add(consumer);
return consumer;
}
public Status status() { return status; }
/** Returns the last snapshot taken of the metrics in this system */
public MetricSnapshot snapshot() {
return snapshot;
}
/** Returns the interval between each metrics snapshot used by this */
public long getSnapshotIntervalMillis() { return snapshotIntervalMs; }
/** NOTE: For unit testing only. May lead to undefined behaviour if StateMonitor thread is running simultaneously **/
boolean checkTime() {
long now = timer.currentTimeMillis();
if (now < lastSnapshotTimeMs + snapshotIntervalMs) {
return false;
}
snapshot = createSnapshot(lastSnapshotTimeMs, now);
lastSnapshotTimeMs = now;
return true;
}
private void run() {
log.finest("StateMonitor started.");
try {
while (!Thread.interrupted()) {
checkTime();
Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis());
}
} catch (InterruptedException e) {
}
log.finest("StateMonitor stopped.");
}
private MetricSnapshot createSnapshot(long fromMillis, long toMillis) {
MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS);
for (StateMetricConsumer consumer : consumers) {
snapshot.add(consumer.createSnapshot());
}
updateNames(snapshot);
return snapshot;
}
private void updateNames(MetricSnapshot current) {
TreeSet<String> seen = new TreeSet<>();
for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) {
for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) {
seen.add(nameAndMetric.getKey());
}
}
synchronized (valueNames) {
for (String name : valueNames) {
if (!seen.contains(name)) {
current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0);
}
}
valueNames.addAll(seen);
}
}
@Override
public void deconstruct() {
thread.interrupt();
try {
thread.join(5000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
if (thread.isAlive()) {
log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring.");
}
}
} |
s/suits/suites/ in this method | private static void restrictSetOfEnabledCiphers(SSLEngine sslEngine) {
String[] validCipherSuits = Arrays.stream(sslEngine.getSupportedCipherSuites())
.filter(ALLOWED_CIPHER_SUITS::contains)
.toArray(String[]::new);
if (validCipherSuits.length == 0) {
throw new IllegalStateException("None of the allowed cipher suits are supported");
}
log.log(Level.FINE, () -> String.format("Allowed cipher suits that are supported: %s", Arrays.toString(validCipherSuits)));
sslEngine.setEnabledCipherSuites(validCipherSuits);
} | } | private static void restrictSetOfEnabledCiphers(SSLEngine sslEngine) {
String[] validCipherSuites = Arrays.stream(sslEngine.getSupportedCipherSuites())
.filter(ALLOWED_CIPHER_SUITES::contains)
.toArray(String[]::new);
if (validCipherSuites.length == 0) {
throw new IllegalStateException("None of the allowed cipher suites are supported");
}
log.log(Level.FINE, () -> String.format("Allowed cipher suites that are supported: %s", Arrays.toString(validCipherSuites)));
sslEngine.setEnabledCipherSuites(validCipherSuites);
} | class DefaultTlsContext implements TlsContext {
public static final List<String> ALLOWED_CIPHER_SUITS = Arrays.asList(
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384",
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256");
private static final Logger log = Logger.getLogger(DefaultTlsContext.class.getName());
private final SSLContext sslContext;
public DefaultTlsContext(List<X509Certificate> certificates,
PrivateKey privateKey,
List<X509Certificate> caCertificates,
AuthorizedPeers authorizedPeers,
AuthorizationMode mode) {
this.sslContext = createSslContext(certificates, privateKey, caCertificates, authorizedPeers, mode);
}
public DefaultTlsContext(Path tlsOptionsConfigFile, AuthorizationMode mode) {
this.sslContext = createSslContext(tlsOptionsConfigFile, mode);
}
@Override
public SSLEngine createSslEngine() {
SSLEngine sslEngine = sslContext.createSSLEngine();
restrictSetOfEnabledCiphers(sslEngine);
return sslEngine;
}
private static SSLContext createSslContext(List<X509Certificate> certificates,
PrivateKey privateKey,
List<X509Certificate> caCertificates,
AuthorizedPeers authorizedPeers,
AuthorizationMode mode) {
SslContextBuilder builder = new SslContextBuilder();
if (!certificates.isEmpty()) {
builder.withKeyStore(privateKey, certificates);
}
if (!caCertificates.isEmpty()) {
builder.withTrustStore(caCertificates);
}
if (authorizedPeers != null) {
builder.withTrustManagerFactory(new PeerAuthorizerTrustManagersFactory(authorizedPeers, mode));
}
return builder.build();
}
private static SSLContext createSslContext(Path tlsOptionsConfigFile, AuthorizationMode mode) {
TransportSecurityOptions options = TransportSecurityOptions.fromJsonFile(tlsOptionsConfigFile);
SslContextBuilder builder = new SslContextBuilder();
options.getCertificatesFile()
.ifPresent(certificates -> builder.withKeyStore(options.getPrivateKeyFile().get(), certificates));
options.getCaCertificatesFile().ifPresent(builder::withTrustStore);
if (mode != AuthorizationMode.DISABLE) {
options.getAuthorizedPeers().ifPresent(
authorizedPeers -> builder.withTrustManagerFactory(new PeerAuthorizerTrustManagersFactory(authorizedPeers, mode)));
}
return builder.build();
}
} | class DefaultTlsContext implements TlsContext {
public static final List<String> ALLOWED_CIPHER_SUITES = Arrays.asList(
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256");
private static final Logger log = Logger.getLogger(DefaultTlsContext.class.getName());
private final SSLContext sslContext;
public DefaultTlsContext(List<X509Certificate> certificates,
PrivateKey privateKey,
List<X509Certificate> caCertificates,
AuthorizedPeers authorizedPeers,
AuthorizationMode mode) {
this.sslContext = createSslContext(certificates, privateKey, caCertificates, authorizedPeers, mode);
}
public DefaultTlsContext(Path tlsOptionsConfigFile, AuthorizationMode mode) {
this.sslContext = createSslContext(tlsOptionsConfigFile, mode);
}
@Override
public SSLEngine createSslEngine() {
SSLEngine sslEngine = sslContext.createSSLEngine();
restrictSetOfEnabledCiphers(sslEngine);
return sslEngine;
}
private static SSLContext createSslContext(List<X509Certificate> certificates,
PrivateKey privateKey,
List<X509Certificate> caCertificates,
AuthorizedPeers authorizedPeers,
AuthorizationMode mode) {
SslContextBuilder builder = new SslContextBuilder();
if (!certificates.isEmpty()) {
builder.withKeyStore(privateKey, certificates);
}
if (!caCertificates.isEmpty()) {
builder.withTrustStore(caCertificates);
}
if (authorizedPeers != null) {
builder.withTrustManagerFactory(new PeerAuthorizerTrustManagersFactory(authorizedPeers, mode));
}
return builder.build();
}
private static SSLContext createSslContext(Path tlsOptionsConfigFile, AuthorizationMode mode) {
TransportSecurityOptions options = TransportSecurityOptions.fromJsonFile(tlsOptionsConfigFile);
SslContextBuilder builder = new SslContextBuilder();
options.getCertificatesFile()
.ifPresent(certificates -> builder.withKeyStore(options.getPrivateKeyFile().get(), certificates));
options.getCaCertificatesFile().ifPresent(builder::withTrustStore);
if (mode != AuthorizationMode.DISABLE) {
options.getAuthorizedPeers().ifPresent(
authorizedPeers -> builder.withTrustManagerFactory(new PeerAuthorizerTrustManagersFactory(authorizedPeers, mode)));
}
return builder.build();
}
} |
Indentation gone wild 🕺. Can also consider factoring out this into a utility method to generate the nth document string, since it's just the number that is different between them. | private void assertFeedSuccessful(SyncFeedClient feedClient) {
List<SyncOperation> operations = new ArrayList<>();
operations.add(new SyncOperation("id::test::1",
"{" +
" \"put\": \"id::test::1\"," +
" \"fields\": {" +
" \"title\": \"Title 1\"" +
" }" +
"}"));
operations.add(new SyncOperation("id::test::2",
"{" +
" \"put\": \"id::test::2\"," +
" \"fields\": {" +
" \"title\": \"Title 2\"" +
" }" +
"}"));
operations.add(new SyncOperation("id::test::3",
"{" +
" \"put\": \"id::test::3\"," +
" \"fields\": {" +
" \"title\": \"Title 3\"" +
" }" +
"}"));
SyncResult result = feedClient.stream(operations);
assertTrue(result.isSuccess());
assertEquals(3, result.results().size());
assertNull(result.exception());
assertEquals("id::test::1", result.results().get(0).getDocumentId());
assertEquals("id::test::2", result.results().get(1).getDocumentId());
assertEquals("id::test::3", result.results().get(2).getDocumentId());
} | "}")); | private void assertFeedSuccessful(SyncFeedClient feedClient) {
List<SyncOperation> operations = new ArrayList<>();
operations.add(new SyncOperation("id::test::1",
"{" +
" \"put\": \"id::test::1\"," +
" \"fields\": {" +
" \"title\": \"Title 1\"" +
" }" +
"}"));
operations.add(new SyncOperation("id::test::2",
"{" +
" \"put\": \"id::test::2\"," +
" \"fields\": {" +
" \"title\": \"Title 2\"" +
" }" +
"}"));
operations.add(new SyncOperation("id::test::3",
"{" +
" \"put\": \"id::test::3\"," +
" \"fields\": {" +
" \"title\": \"Title 3\"" +
" }" +
"}"));
SyncResult result = feedClient.stream(operations);
assertTrue(result.isSuccess());
assertEquals(3, result.results().size());
assertNull(result.exception());
assertEquals("id::test::1", result.results().get(0).getDocumentId());
assertEquals("id::test::2", result.results().get(1).getDocumentId());
assertEquals("id::test::3", result.results().get(2).getDocumentId());
} | class SyncFeedClientTest {
@Test
public void testFeedJson() {
SessionParams sessionParams = new SessionParams.Builder()
.addCluster(new Cluster.Builder()
.addEndpoint(Endpoint.create("hostname"))
.build())
.setConnectionParams(new ConnectionParams.Builder()
.setDryRun(true)
.build())
.build();
SyncFeedClient feedClient = new SyncFeedClient(sessionParams);
assertFeedSuccessful(feedClient);
assertFeedSuccessful(feedClient);
feedClient.close();
}
} | class SyncFeedClientTest {
@Test
public void testFeedJson() {
SessionParams sessionParams = new SessionParams.Builder()
.addCluster(new Cluster.Builder()
.addEndpoint(Endpoint.create("hostname"))
.build())
.setConnectionParams(new ConnectionParams.Builder()
.setDryRun(true)
.build())
.build();
SyncFeedClient feedClient = new SyncFeedClient(sessionParams);
assertFeedSuccessful(feedClient);
assertFeedSuccessful(feedClient);
feedClient.close();
}
} |
Yes, it's a tradeoff with keeping it really simple though. | private void assertFeedSuccessful(SyncFeedClient feedClient) {
List<SyncOperation> operations = new ArrayList<>();
operations.add(new SyncOperation("id::test::1",
"{" +
" \"put\": \"id::test::1\"," +
" \"fields\": {" +
" \"title\": \"Title 1\"" +
" }" +
"}"));
operations.add(new SyncOperation("id::test::2",
"{" +
" \"put\": \"id::test::2\"," +
" \"fields\": {" +
" \"title\": \"Title 2\"" +
" }" +
"}"));
operations.add(new SyncOperation("id::test::3",
"{" +
" \"put\": \"id::test::3\"," +
" \"fields\": {" +
" \"title\": \"Title 3\"" +
" }" +
"}"));
SyncResult result = feedClient.stream(operations);
assertTrue(result.isSuccess());
assertEquals(3, result.results().size());
assertNull(result.exception());
assertEquals("id::test::1", result.results().get(0).getDocumentId());
assertEquals("id::test::2", result.results().get(1).getDocumentId());
assertEquals("id::test::3", result.results().get(2).getDocumentId());
} | "}")); | private void assertFeedSuccessful(SyncFeedClient feedClient) {
List<SyncOperation> operations = new ArrayList<>();
operations.add(new SyncOperation("id::test::1",
"{" +
" \"put\": \"id::test::1\"," +
" \"fields\": {" +
" \"title\": \"Title 1\"" +
" }" +
"}"));
operations.add(new SyncOperation("id::test::2",
"{" +
" \"put\": \"id::test::2\"," +
" \"fields\": {" +
" \"title\": \"Title 2\"" +
" }" +
"}"));
operations.add(new SyncOperation("id::test::3",
"{" +
" \"put\": \"id::test::3\"," +
" \"fields\": {" +
" \"title\": \"Title 3\"" +
" }" +
"}"));
SyncResult result = feedClient.stream(operations);
assertTrue(result.isSuccess());
assertEquals(3, result.results().size());
assertNull(result.exception());
assertEquals("id::test::1", result.results().get(0).getDocumentId());
assertEquals("id::test::2", result.results().get(1).getDocumentId());
assertEquals("id::test::3", result.results().get(2).getDocumentId());
} | class SyncFeedClientTest {
@Test
public void testFeedJson() {
SessionParams sessionParams = new SessionParams.Builder()
.addCluster(new Cluster.Builder()
.addEndpoint(Endpoint.create("hostname"))
.build())
.setConnectionParams(new ConnectionParams.Builder()
.setDryRun(true)
.build())
.build();
SyncFeedClient feedClient = new SyncFeedClient(sessionParams);
assertFeedSuccessful(feedClient);
assertFeedSuccessful(feedClient);
feedClient.close();
}
} | class SyncFeedClientTest {
@Test
public void testFeedJson() {
SessionParams sessionParams = new SessionParams.Builder()
.addCluster(new Cluster.Builder()
.addEndpoint(Endpoint.create("hostname"))
.build())
.setConnectionParams(new ConnectionParams.Builder()
.setDryRun(true)
.build())
.build();
SyncFeedClient feedClient = new SyncFeedClient(sessionParams);
assertFeedSuccessful(feedClient);
assertFeedSuccessful(feedClient);
feedClient.close();
}
} |
Consider creating two locally scoped suppliers (`Supplier<ApplicationPackage>`), one for each store, and then use those here and below. | public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
} | ? new ApplicationPackage(applicationStore.get(application.id(), version)) | public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final ZmsClientFacade zmsClient;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository, ApplicationStore applicationStore,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.zmsClient = new ZmsClientFacade(zmsClientFactory.createZmsClient(), zmsClientFactory.getControllerIdentity());
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.applicationStore = applicationStore;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, buildService, clock);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer().nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint
.replaceAll(".vespa.yahooapis.com", "")
.replaceAll(".vespa.oath.cloud", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<OktaAccessToken> token) {
if ( ! (id.instance().isDefault()))
throw new IllegalArgumentException("Only the instance name 'default' is supported at the moment");
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No Okta Access Token provided");
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()), token.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options);
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
}
else {
JobType jobType = JobType.from(controller.system(), zone);
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( ! job.isPresent()
|| ! job.get().lastTriggered().isPresent()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion
? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion
? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage);
if ( ! preferOldestVersion && ! application.get().deploymentJobs().deployedInternally())
application = storeWithUpdatedConfig(application, applicationPackage);
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
Application app = application.get();
app.globalDnsName(controller.system()).ifPresent(applicationRotation -> {
rotationNames.add(app.rotation().orElseThrow(() -> new RuntimeException("Global Dns assigned, but no rotation id present")).asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
cnames.add(applicationRotation.oathDnsName());
});
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, cnames);
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return result;
}
}
/** Fetches the requested application package from the artifact store(s). */
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return(application);
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
deploy(application.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
} else {
application.nodeTypes().forEach(nodeType -> configServer().nodeRepository().upgrade(zone, nodeType, version));
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
Set<String> rotationNames, Set<String> cnames) {
DeploymentId deploymentId = new DeploymentId(application, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, cnames, rotationNames,
applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.get().deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application.get(), rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().dnsName());
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().secureDnsName());
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().oathDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
RecordData rotationName = RecordData.fqdn(rotation.name());
List<Record> records = nameService.findRecords(Record.Type.CNAME, RecordName.from(dnsName));
records.forEach(record -> {
if ( ! record.data().equals(rotationName)) {
nameService.updateRecord(record.id(), rotationName);
log.info("Updated mapping for record ID " + record.id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
});
if (records.isEmpty()) {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<OktaAccessToken> token) {
List<ApplicationId> instances = asList(applicationId.tenant()).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No Okta Access Token provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClient.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()), token.get());
}
curator.removeApplication(id);
applicationStore.removeAll(id);
applicationStore.removeAll(TesterId.of(id));
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
new DeploymentSteps(deploymentSpec, controller::system).jobs();
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage) {
applicationPackage.deploymentSpec().athenzDomain()
.ifPresent(identityDomain -> {
Optional<Tenant> tenant = controller.tenants().tenant(tenantName);
if(!tenant.isPresent()) {
throw new IllegalArgumentException("Tenant does not exist");
} else {
AthenzDomain tenantDomain = tenant.filter(t -> t instanceof AthenzTenant)
.map(t -> (AthenzTenant) t)
.orElseThrow(() -> new IllegalArgumentException(
String.format("Athenz domain defined in deployment.xml, but no Athenz domain for tenant (%s). " +
"It is currently not possible to launch Athenz services from personal tenants, use " +
"Athenz tenant instead.",
tenantName.value())))
.domain();
if (!Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException(String.format("Athenz domain in deployment.xml: [%s] must match tenant domain: [%s]",
identityDomain.value(),
tenantDomain.getName()));
}
});
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final ZmsClientFacade zmsClient;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository, ApplicationStore applicationStore,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.zmsClient = new ZmsClientFacade(zmsClientFactory.createZmsClient(), zmsClientFactory.getControllerIdentity());
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.applicationStore = applicationStore;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, buildService, clock);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer().nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint
.replaceAll(".vespa.yahooapis.com", "")
.replaceAll(".vespa.oath.cloud", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<OktaAccessToken> token) {
if ( ! (id.instance().isDefault()))
throw new IllegalArgumentException("Only the instance name 'default' is supported at the moment");
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No Okta Access Token provided");
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()), token.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options);
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
}
else {
JobType jobType = JobType.from(controller.system(), zone);
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( ! job.isPresent()
|| ! job.get().lastTriggered().isPresent()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion
? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion
? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage);
if ( ! preferOldestVersion && ! application.get().deploymentJobs().deployedInternally())
application = storeWithUpdatedConfig(application, applicationPackage);
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
Application app = application.get();
app.globalDnsName(controller.system()).ifPresent(applicationRotation -> {
rotationNames.add(app.rotation().orElseThrow(() -> new RuntimeException("Global Dns assigned, but no rotation id present")).asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
cnames.add(applicationRotation.oathDnsName());
});
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, cnames);
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return result;
}
}
/** Fetches the requested application package from the artifact store(s). */
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return(application);
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
deploy(application.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
} else {
application.nodeTypes().forEach(nodeType -> configServer().nodeRepository().upgrade(zone, nodeType, version));
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
Set<String> rotationNames, Set<String> cnames) {
DeploymentId deploymentId = new DeploymentId(application, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, cnames, rotationNames,
applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.get().deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application.get(), rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().dnsName());
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().secureDnsName());
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().oathDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
RecordData rotationName = RecordData.fqdn(rotation.name());
List<Record> records = nameService.findRecords(Record.Type.CNAME, RecordName.from(dnsName));
records.forEach(record -> {
if ( ! record.data().equals(rotationName)) {
nameService.updateRecord(record.id(), rotationName);
log.info("Updated mapping for record ID " + record.id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
});
if (records.isEmpty()) {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<OktaAccessToken> token) {
List<ApplicationId> instances = asList(applicationId.tenant()).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No Okta Access Token provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClient.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()), token.get());
}
curator.removeApplication(id);
applicationStore.removeAll(id);
applicationStore.removeAll(TesterId.of(id));
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
new DeploymentSteps(deploymentSpec, controller::system).jobs();
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage) {
applicationPackage.deploymentSpec().athenzDomain()
.ifPresent(identityDomain -> {
Optional<Tenant> tenant = controller.tenants().tenant(tenantName);
if(!tenant.isPresent()) {
throw new IllegalArgumentException("Tenant does not exist");
} else {
AthenzDomain tenantDomain = tenant.filter(t -> t instanceof AthenzTenant)
.map(t -> (AthenzTenant) t)
.orElseThrow(() -> new IllegalArgumentException(
String.format("Athenz domain defined in deployment.xml, but no Athenz domain for tenant (%s). " +
"It is currently not possible to launch Athenz services from personal tenants, use " +
"Athenz tenant instead.",
tenantName.value())))
.domain();
if (!Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException(String.format("Athenz domain in deployment.xml: [%s] must match tenant domain: [%s]",
identityDomain.value(),
tenantDomain.getName()));
}
});
}
} |
This kludge will be gone soon, and I'll clean up then :) | public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
} | ? new ApplicationPackage(applicationStore.get(application.id(), version)) | public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final ZmsClientFacade zmsClient;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository, ApplicationStore applicationStore,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.zmsClient = new ZmsClientFacade(zmsClientFactory.createZmsClient(), zmsClientFactory.getControllerIdentity());
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.applicationStore = applicationStore;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, buildService, clock);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer().nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint
.replaceAll(".vespa.yahooapis.com", "")
.replaceAll(".vespa.oath.cloud", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<OktaAccessToken> token) {
if ( ! (id.instance().isDefault()))
throw new IllegalArgumentException("Only the instance name 'default' is supported at the moment");
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No Okta Access Token provided");
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()), token.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options);
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
}
else {
JobType jobType = JobType.from(controller.system(), zone);
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( ! job.isPresent()
|| ! job.get().lastTriggered().isPresent()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion
? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion
? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage);
if ( ! preferOldestVersion && ! application.get().deploymentJobs().deployedInternally())
application = storeWithUpdatedConfig(application, applicationPackage);
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
Application app = application.get();
app.globalDnsName(controller.system()).ifPresent(applicationRotation -> {
rotationNames.add(app.rotation().orElseThrow(() -> new RuntimeException("Global Dns assigned, but no rotation id present")).asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
cnames.add(applicationRotation.oathDnsName());
});
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, cnames);
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return result;
}
}
/** Fetches the requested application package from the artifact store(s). */
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return(application);
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
deploy(application.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
} else {
application.nodeTypes().forEach(nodeType -> configServer().nodeRepository().upgrade(zone, nodeType, version));
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
Set<String> rotationNames, Set<String> cnames) {
DeploymentId deploymentId = new DeploymentId(application, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, cnames, rotationNames,
applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.get().deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application.get(), rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().dnsName());
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().secureDnsName());
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().oathDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
RecordData rotationName = RecordData.fqdn(rotation.name());
List<Record> records = nameService.findRecords(Record.Type.CNAME, RecordName.from(dnsName));
records.forEach(record -> {
if ( ! record.data().equals(rotationName)) {
nameService.updateRecord(record.id(), rotationName);
log.info("Updated mapping for record ID " + record.id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
});
if (records.isEmpty()) {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<OktaAccessToken> token) {
List<ApplicationId> instances = asList(applicationId.tenant()).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No Okta Access Token provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClient.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()), token.get());
}
curator.removeApplication(id);
applicationStore.removeAll(id);
applicationStore.removeAll(TesterId.of(id));
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
new DeploymentSteps(deploymentSpec, controller::system).jobs();
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage) {
applicationPackage.deploymentSpec().athenzDomain()
.ifPresent(identityDomain -> {
Optional<Tenant> tenant = controller.tenants().tenant(tenantName);
if(!tenant.isPresent()) {
throw new IllegalArgumentException("Tenant does not exist");
} else {
AthenzDomain tenantDomain = tenant.filter(t -> t instanceof AthenzTenant)
.map(t -> (AthenzTenant) t)
.orElseThrow(() -> new IllegalArgumentException(
String.format("Athenz domain defined in deployment.xml, but no Athenz domain for tenant (%s). " +
"It is currently not possible to launch Athenz services from personal tenants, use " +
"Athenz tenant instead.",
tenantName.value())))
.domain();
if (!Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException(String.format("Athenz domain in deployment.xml: [%s] must match tenant domain: [%s]",
identityDomain.value(),
tenantDomain.getName()));
}
});
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final ZmsClientFacade zmsClient;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository, ApplicationStore applicationStore,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.zmsClient = new ZmsClientFacade(zmsClientFactory.createZmsClient(), zmsClientFactory.getControllerIdentity());
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.applicationStore = applicationStore;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, buildService, clock);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer().nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint
.replaceAll(".vespa.yahooapis.com", "")
.replaceAll(".vespa.oath.cloud", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<OktaAccessToken> token) {
if ( ! (id.instance().isDefault()))
throw new IllegalArgumentException("Only the instance name 'default' is supported at the moment");
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No Okta Access Token provided");
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()), token.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options);
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
}
else {
JobType jobType = JobType.from(controller.system(), zone);
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( ! job.isPresent()
|| ! job.get().lastTriggered().isPresent()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion
? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion
? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage);
if ( ! preferOldestVersion && ! application.get().deploymentJobs().deployedInternally())
application = storeWithUpdatedConfig(application, applicationPackage);
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
Application app = application.get();
app.globalDnsName(controller.system()).ifPresent(applicationRotation -> {
rotationNames.add(app.rotation().orElseThrow(() -> new RuntimeException("Global Dns assigned, but no rotation id present")).asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
cnames.add(applicationRotation.oathDnsName());
});
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, cnames);
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return result;
}
}
/** Fetches the requested application package from the artifact store(s). */
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return(application);
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
deploy(application.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
} else {
application.nodeTypes().forEach(nodeType -> configServer().nodeRepository().upgrade(zone, nodeType, version));
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
Set<String> rotationNames, Set<String> cnames) {
DeploymentId deploymentId = new DeploymentId(application, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, cnames, rotationNames,
applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.get().deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application.get(), rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().dnsName());
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().secureDnsName());
registerRotationInDns(rotation, application.get().globalDnsName(controller.system()).get().oathDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
RecordData rotationName = RecordData.fqdn(rotation.name());
List<Record> records = nameService.findRecords(Record.Type.CNAME, RecordName.from(dnsName));
records.forEach(record -> {
if ( ! record.data().equals(rotationName)) {
nameService.updateRecord(record.id(), rotationName);
log.info("Updated mapping for record ID " + record.id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
});
if (records.isEmpty()) {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<OktaAccessToken> token) {
List<ApplicationId> instances = asList(applicationId.tenant()).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No Okta Access Token provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClient.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()), token.get());
}
curator.removeApplication(id);
applicationStore.removeAll(id);
applicationStore.removeAll(TesterId.of(id));
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
new DeploymentSteps(deploymentSpec, controller::system).jobs();
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage) {
applicationPackage.deploymentSpec().athenzDomain()
.ifPresent(identityDomain -> {
Optional<Tenant> tenant = controller.tenants().tenant(tenantName);
if(!tenant.isPresent()) {
throw new IllegalArgumentException("Tenant does not exist");
} else {
AthenzDomain tenantDomain = tenant.filter(t -> t instanceof AthenzTenant)
.map(t -> (AthenzTenant) t)
.orElseThrow(() -> new IllegalArgumentException(
String.format("Athenz domain defined in deployment.xml, but no Athenz domain for tenant (%s). " +
"It is currently not possible to launch Athenz services from personal tenants, use " +
"Athenz tenant instead.",
tenantName.value())))
.domain();
if (!Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException(String.format("Athenz domain in deployment.xml: [%s] must match tenant domain: [%s]",
identityDomain.value(),
tenantDomain.getName()));
}
});
}
} |
This is how I would do it: The FileFlagSource should get injected. Define the FeatureFlag only once in the constructor, taking in the source. You would have a `private final FeatureFlag useConfigServerCache` that you'd call value() on here. | private boolean useCache(GetConfigRequest request) {
if (request.noCache())
return false;
else
return new FeatureFlag("use-config-server-cache", true, new FileFlagSource()).value();
} | return new FeatureFlag("use-config-server-cache", true, new FileFlagSource()).value(); | private boolean useCache(GetConfigRequest request) {
if (request.noCache())
return false;
else
return useConfigServerCache.value();
} | class Application implements ModelResult {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(Application.class.getName());
private final long appGeneration;
private final boolean internalRedeploy;
private final Version vespaVersion;
private final Model model;
private final ServerCache cache;
private final MetricUpdater metricUpdater;
private final ApplicationId app;
public Application(Model model, ServerCache cache, long appGeneration, boolean internalRedeploy,
Version vespaVersion, MetricUpdater metricUpdater, ApplicationId app) {
Objects.requireNonNull(model, "The model cannot be null");
this.model = model;
this.cache = cache;
this.appGeneration = appGeneration;
this.internalRedeploy = internalRedeploy;
this.vespaVersion = vespaVersion;
this.metricUpdater = metricUpdater;
this.app = app;
}
/**
* Returns the generation for the config we are currently serving
*
* @return the config generation
*/
public Long getApplicationGeneration() { return appGeneration; }
/** Returns the application model, never null */
@Override
public Model getModel() { return model; }
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("application '").append(app.application().value()).append("', ");
sb.append("generation ").append(appGeneration).append(", ");
sb.append("vespa version ").append(vespaVersion);
return sb.toString();
}
public ApplicationInfo toApplicationInfo() {
return new ApplicationInfo(app, appGeneration, model);
}
public ServerCache getCache() {
return cache;
}
public ApplicationId getId() {
return app;
}
public Version getVespaVersion() {
return vespaVersion;
}
/**
* Gets a config from ZK. Returns null if not found.
*/
public ConfigResponse resolveConfig(GetConfigRequest req, ConfigResponseFactory responseFactory) {
long start = System.currentTimeMillis();
metricUpdater.incrementRequests();
ConfigKey<?> configKey = req.getConfigKey();
String defMd5 = configKey.getMd5();
if (defMd5 == null || defMd5.isEmpty()) {
defMd5 = ConfigUtils.getDefMd5(req.getDefContent().asList());
}
ConfigCacheKey cacheKey = new ConfigCacheKey(configKey, defMd5);
if (logDebug()) {
debug("Resolving config " + cacheKey);
}
if (useCache(req)) {
ConfigResponse config = cache.get(cacheKey);
if (config != null) {
if (logDebug()) {
debug("Found config " + cacheKey + " in cache");
}
metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
return config;
}
}
ConfigDefinition def = getTargetDef(req);
if (def == null) {
metricUpdater.incrementFailedRequests();
throw new UnknownConfigDefinitionException("Unable to find config definition for '" + configKey.getNamespace() + "." + configKey.getName());
}
if (logDebug()) {
debug("Resolving " + configKey + " with config definition " + def);
}
ConfigPayload payload = model.getConfig(configKey, def);
if (payload == null) {
metricUpdater.incrementFailedRequests();
throw new ConfigurationRuntimeException("Unable to resolve config " + configKey);
}
ConfigResponse configResponse = responseFactory.createResponse(payload, def.getCNode(), appGeneration, internalRedeploy);
metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
if (useCache(req)) {
cache.put(cacheKey, configResponse, configResponse.getConfigMd5());
metricUpdater.setCacheConfigElems(cache.configElems());
metricUpdater.setCacheChecksumElems(cache.checkSumElems());
}
return configResponse;
}
private boolean logDebug() {
return log.isLoggable(LogLevel.DEBUG);
}
private void debug(String message) {
log.log(LogLevel.DEBUG, TenantRepository.logPre(getId())+message);
}
private ConfigDefinition getTargetDef(GetConfigRequest req) {
ConfigKey<?> configKey = req.getConfigKey();
DefContent def = req.getDefContent();
ConfigDefinitionKey configDefinitionKey = new ConfigDefinitionKey(configKey.getName(), configKey.getNamespace());
if (def.isEmpty()) {
if (logDebug()) {
debug("No config schema in request for " + configKey);
}
return cache.getDef(configDefinitionKey);
} else {
if (logDebug()) {
debug("Got config schema from request, length:" + def.asList().size() + " : " + configKey);
}
return new ConfigDefinition(configKey.getName(), def.asStringArray());
}
}
void updateHostMetrics(int numHosts) {
metricUpdater.setHosts(numHosts);
}
ConfigResponse resolveConfig(GetConfigRequest req) {
return resolveConfig(req, new UncompressedConfigResponseFactory());
}
public Set<ConfigKey<?>> allConfigsProduced() {
return model.allConfigsProduced();
}
public Set<String> allConfigIds() {
return model.allConfigIds();
}
} | class Application implements ModelResult {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(Application.class.getName());
private final long appGeneration;
private final boolean internalRedeploy;
private final Version vespaVersion;
private final Model model;
private final ServerCache cache;
private final MetricUpdater metricUpdater;
private final ApplicationId app;
private final FeatureFlag useConfigServerCache;
public Application(Model model, ServerCache cache, long appGeneration, boolean internalRedeploy,
Version vespaVersion, MetricUpdater metricUpdater, ApplicationId app) {
this(model, cache, appGeneration, internalRedeploy, vespaVersion, metricUpdater, app, new FileFlagSource());
}
public Application(Model model, ServerCache cache, long appGeneration, boolean internalRedeploy,
Version vespaVersion, MetricUpdater metricUpdater, ApplicationId app, FlagSource flagSource) {
Objects.requireNonNull(model, "The model cannot be null");
this.model = model;
this.cache = cache;
this.appGeneration = appGeneration;
this.internalRedeploy = internalRedeploy;
this.vespaVersion = vespaVersion;
this.metricUpdater = metricUpdater;
this.app = app;
this.useConfigServerCache = new FeatureFlag("use-config-server-cache", true, flagSource);
}
/**
* Returns the generation for the config we are currently serving
*
* @return the config generation
*/
public Long getApplicationGeneration() { return appGeneration; }
/** Returns the application model, never null */
@Override
public Model getModel() { return model; }
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("application '").append(app.application().value()).append("', ");
sb.append("generation ").append(appGeneration).append(", ");
sb.append("vespa version ").append(vespaVersion);
return sb.toString();
}
public ApplicationInfo toApplicationInfo() {
return new ApplicationInfo(app, appGeneration, model);
}
public ServerCache getCache() {
return cache;
}
public ApplicationId getId() {
return app;
}
public Version getVespaVersion() {
return vespaVersion;
}
/**
* Gets a config from ZK. Returns null if not found.
*/
public ConfigResponse resolveConfig(GetConfigRequest req, ConfigResponseFactory responseFactory) {
long start = System.currentTimeMillis();
metricUpdater.incrementRequests();
ConfigKey<?> configKey = req.getConfigKey();
String defMd5 = configKey.getMd5();
if (defMd5 == null || defMd5.isEmpty()) {
defMd5 = ConfigUtils.getDefMd5(req.getDefContent().asList());
}
ConfigCacheKey cacheKey = new ConfigCacheKey(configKey, defMd5);
if (logDebug()) {
debug("Resolving config " + cacheKey);
}
if (useCache(req)) {
ConfigResponse config = cache.get(cacheKey);
if (config != null) {
if (logDebug()) {
debug("Found config " + cacheKey + " in cache");
}
metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
return config;
}
}
ConfigDefinition def = getTargetDef(req);
if (def == null) {
metricUpdater.incrementFailedRequests();
throw new UnknownConfigDefinitionException("Unable to find config definition for '" + configKey.getNamespace() + "." + configKey.getName());
}
if (logDebug()) {
debug("Resolving " + configKey + " with config definition " + def);
}
ConfigPayload payload = model.getConfig(configKey, def);
if (payload == null) {
metricUpdater.incrementFailedRequests();
throw new ConfigurationRuntimeException("Unable to resolve config " + configKey);
}
ConfigResponse configResponse = responseFactory.createResponse(payload, def.getCNode(), appGeneration, internalRedeploy);
metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
if (useCache(req)) {
cache.put(cacheKey, configResponse, configResponse.getConfigMd5());
metricUpdater.setCacheConfigElems(cache.configElems());
metricUpdater.setCacheChecksumElems(cache.checkSumElems());
}
return configResponse;
}
private boolean logDebug() {
return log.isLoggable(LogLevel.DEBUG);
}
private void debug(String message) {
log.log(LogLevel.DEBUG, TenantRepository.logPre(getId())+message);
}
private ConfigDefinition getTargetDef(GetConfigRequest req) {
ConfigKey<?> configKey = req.getConfigKey();
DefContent def = req.getDefContent();
ConfigDefinitionKey configDefinitionKey = new ConfigDefinitionKey(configKey.getName(), configKey.getNamespace());
if (def.isEmpty()) {
if (logDebug()) {
debug("No config schema in request for " + configKey);
}
return cache.getDef(configDefinitionKey);
} else {
if (logDebug()) {
debug("Got config schema from request, length:" + def.asList().size() + " : " + configKey);
}
return new ConfigDefinition(configKey.getName(), def.asStringArray());
}
}
void updateHostMetrics(int numHosts) {
metricUpdater.setHosts(numHosts);
}
ConfigResponse resolveConfig(GetConfigRequest req) {
return resolveConfig(req, new UncompressedConfigResponseFactory());
}
public Set<ConfigKey<?>> allConfigsProduced() {
return model.allConfigsProduced();
}
public Set<String> allConfigIds() {
return model.allConfigIds();
}
} |
Thanks, that's better, I have implemented that in the second commit | private boolean useCache(GetConfigRequest request) {
if (request.noCache())
return false;
else
return new FeatureFlag("use-config-server-cache", true, new FileFlagSource()).value();
} | return new FeatureFlag("use-config-server-cache", true, new FileFlagSource()).value(); | private boolean useCache(GetConfigRequest request) {
if (request.noCache())
return false;
else
return useConfigServerCache.value();
} | class Application implements ModelResult {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(Application.class.getName());
private final long appGeneration;
private final boolean internalRedeploy;
private final Version vespaVersion;
private final Model model;
private final ServerCache cache;
private final MetricUpdater metricUpdater;
private final ApplicationId app;
public Application(Model model, ServerCache cache, long appGeneration, boolean internalRedeploy,
Version vespaVersion, MetricUpdater metricUpdater, ApplicationId app) {
Objects.requireNonNull(model, "The model cannot be null");
this.model = model;
this.cache = cache;
this.appGeneration = appGeneration;
this.internalRedeploy = internalRedeploy;
this.vespaVersion = vespaVersion;
this.metricUpdater = metricUpdater;
this.app = app;
}
/**
* Returns the generation for the config we are currently serving
*
* @return the config generation
*/
public Long getApplicationGeneration() { return appGeneration; }
/** Returns the application model, never null */
@Override
public Model getModel() { return model; }
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("application '").append(app.application().value()).append("', ");
sb.append("generation ").append(appGeneration).append(", ");
sb.append("vespa version ").append(vespaVersion);
return sb.toString();
}
public ApplicationInfo toApplicationInfo() {
return new ApplicationInfo(app, appGeneration, model);
}
public ServerCache getCache() {
return cache;
}
public ApplicationId getId() {
return app;
}
public Version getVespaVersion() {
return vespaVersion;
}
/**
* Gets a config from ZK. Returns null if not found.
*/
public ConfigResponse resolveConfig(GetConfigRequest req, ConfigResponseFactory responseFactory) {
long start = System.currentTimeMillis();
metricUpdater.incrementRequests();
ConfigKey<?> configKey = req.getConfigKey();
String defMd5 = configKey.getMd5();
if (defMd5 == null || defMd5.isEmpty()) {
defMd5 = ConfigUtils.getDefMd5(req.getDefContent().asList());
}
ConfigCacheKey cacheKey = new ConfigCacheKey(configKey, defMd5);
if (logDebug()) {
debug("Resolving config " + cacheKey);
}
if (useCache(req)) {
ConfigResponse config = cache.get(cacheKey);
if (config != null) {
if (logDebug()) {
debug("Found config " + cacheKey + " in cache");
}
metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
return config;
}
}
ConfigDefinition def = getTargetDef(req);
if (def == null) {
metricUpdater.incrementFailedRequests();
throw new UnknownConfigDefinitionException("Unable to find config definition for '" + configKey.getNamespace() + "." + configKey.getName());
}
if (logDebug()) {
debug("Resolving " + configKey + " with config definition " + def);
}
ConfigPayload payload = model.getConfig(configKey, def);
if (payload == null) {
metricUpdater.incrementFailedRequests();
throw new ConfigurationRuntimeException("Unable to resolve config " + configKey);
}
ConfigResponse configResponse = responseFactory.createResponse(payload, def.getCNode(), appGeneration, internalRedeploy);
metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
if (useCache(req)) {
cache.put(cacheKey, configResponse, configResponse.getConfigMd5());
metricUpdater.setCacheConfigElems(cache.configElems());
metricUpdater.setCacheChecksumElems(cache.checkSumElems());
}
return configResponse;
}
private boolean logDebug() {
return log.isLoggable(LogLevel.DEBUG);
}
private void debug(String message) {
log.log(LogLevel.DEBUG, TenantRepository.logPre(getId())+message);
}
private ConfigDefinition getTargetDef(GetConfigRequest req) {
ConfigKey<?> configKey = req.getConfigKey();
DefContent def = req.getDefContent();
ConfigDefinitionKey configDefinitionKey = new ConfigDefinitionKey(configKey.getName(), configKey.getNamespace());
if (def.isEmpty()) {
if (logDebug()) {
debug("No config schema in request for " + configKey);
}
return cache.getDef(configDefinitionKey);
} else {
if (logDebug()) {
debug("Got config schema from request, length:" + def.asList().size() + " : " + configKey);
}
return new ConfigDefinition(configKey.getName(), def.asStringArray());
}
}
void updateHostMetrics(int numHosts) {
metricUpdater.setHosts(numHosts);
}
ConfigResponse resolveConfig(GetConfigRequest req) {
return resolveConfig(req, new UncompressedConfigResponseFactory());
}
public Set<ConfigKey<?>> allConfigsProduced() {
return model.allConfigsProduced();
}
public Set<String> allConfigIds() {
return model.allConfigIds();
}
} | class Application implements ModelResult {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(Application.class.getName());
private final long appGeneration;
private final boolean internalRedeploy;
private final Version vespaVersion;
private final Model model;
private final ServerCache cache;
private final MetricUpdater metricUpdater;
private final ApplicationId app;
private final FeatureFlag useConfigServerCache;
public Application(Model model, ServerCache cache, long appGeneration, boolean internalRedeploy,
Version vespaVersion, MetricUpdater metricUpdater, ApplicationId app) {
this(model, cache, appGeneration, internalRedeploy, vespaVersion, metricUpdater, app, new FileFlagSource());
}
public Application(Model model, ServerCache cache, long appGeneration, boolean internalRedeploy,
Version vespaVersion, MetricUpdater metricUpdater, ApplicationId app, FlagSource flagSource) {
Objects.requireNonNull(model, "The model cannot be null");
this.model = model;
this.cache = cache;
this.appGeneration = appGeneration;
this.internalRedeploy = internalRedeploy;
this.vespaVersion = vespaVersion;
this.metricUpdater = metricUpdater;
this.app = app;
this.useConfigServerCache = new FeatureFlag("use-config-server-cache", true, flagSource);
}
/**
* Returns the generation for the config we are currently serving
*
* @return the config generation
*/
public Long getApplicationGeneration() { return appGeneration; }
/** Returns the application model, never null */
@Override
public Model getModel() { return model; }
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("application '").append(app.application().value()).append("', ");
sb.append("generation ").append(appGeneration).append(", ");
sb.append("vespa version ").append(vespaVersion);
return sb.toString();
}
public ApplicationInfo toApplicationInfo() {
return new ApplicationInfo(app, appGeneration, model);
}
public ServerCache getCache() {
return cache;
}
public ApplicationId getId() {
return app;
}
public Version getVespaVersion() {
return vespaVersion;
}
/**
* Gets a config from ZK. Returns null if not found.
*/
public ConfigResponse resolveConfig(GetConfigRequest req, ConfigResponseFactory responseFactory) {
long start = System.currentTimeMillis();
metricUpdater.incrementRequests();
ConfigKey<?> configKey = req.getConfigKey();
String defMd5 = configKey.getMd5();
if (defMd5 == null || defMd5.isEmpty()) {
defMd5 = ConfigUtils.getDefMd5(req.getDefContent().asList());
}
ConfigCacheKey cacheKey = new ConfigCacheKey(configKey, defMd5);
if (logDebug()) {
debug("Resolving config " + cacheKey);
}
if (useCache(req)) {
ConfigResponse config = cache.get(cacheKey);
if (config != null) {
if (logDebug()) {
debug("Found config " + cacheKey + " in cache");
}
metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
return config;
}
}
ConfigDefinition def = getTargetDef(req);
if (def == null) {
metricUpdater.incrementFailedRequests();
throw new UnknownConfigDefinitionException("Unable to find config definition for '" + configKey.getNamespace() + "." + configKey.getName());
}
if (logDebug()) {
debug("Resolving " + configKey + " with config definition " + def);
}
ConfigPayload payload = model.getConfig(configKey, def);
if (payload == null) {
metricUpdater.incrementFailedRequests();
throw new ConfigurationRuntimeException("Unable to resolve config " + configKey);
}
ConfigResponse configResponse = responseFactory.createResponse(payload, def.getCNode(), appGeneration, internalRedeploy);
metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
if (useCache(req)) {
cache.put(cacheKey, configResponse, configResponse.getConfigMd5());
metricUpdater.setCacheConfigElems(cache.configElems());
metricUpdater.setCacheChecksumElems(cache.checkSumElems());
}
return configResponse;
}
private boolean logDebug() {
return log.isLoggable(LogLevel.DEBUG);
}
private void debug(String message) {
log.log(LogLevel.DEBUG, TenantRepository.logPre(getId())+message);
}
private ConfigDefinition getTargetDef(GetConfigRequest req) {
ConfigKey<?> configKey = req.getConfigKey();
DefContent def = req.getDefContent();
ConfigDefinitionKey configDefinitionKey = new ConfigDefinitionKey(configKey.getName(), configKey.getNamespace());
if (def.isEmpty()) {
if (logDebug()) {
debug("No config schema in request for " + configKey);
}
return cache.getDef(configDefinitionKey);
} else {
if (logDebug()) {
debug("Got config schema from request, length:" + def.asList().size() + " : " + configKey);
}
return new ConfigDefinition(configKey.getName(), def.asStringArray());
}
}
void updateHostMetrics(int numHosts) {
metricUpdater.setHosts(numHosts);
}
ConfigResponse resolveConfig(GetConfigRequest req) {
return resolveConfig(req, new UncompressedConfigResponseFactory());
}
public Set<ConfigKey<?>> allConfigsProduced() {
return model.allConfigsProduced();
}
public Set<String> allConfigIds() {
return model.allConfigIds();
}
} |
Consider making a package-private ConfigServerBootstrap constructor that takes in the same arguments as the current constructor, but instead of a flag source takes in the feature flag value of the bootstrapFeatureFlag. This way you can construct a ConfigServerBootstrap for the test with an explicit value for the feature flag, without having to know about flag source's getString & deserialization. | public void testBootstrapNonHostedOneConfigModel() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder);
String vespaVersion = "1.2.3";
List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion)));
List<Host> hosts = createHosts(vespaVersion);
InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true);
Curator curator = new MockCurator();
DeployTester tester = new DeployTester(modelFactories, configserverConfig,
Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()),
provisioner, curator);
tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now());
ApplicationId applicationId = tester.applicationId();
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2"));
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
FlagSource flagSource = mock(FlagSource.class);
when(flagSource.getString(new FlagId(bootstrapFeatureFlag))).thenReturn(Optional.of("true"));
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
createStateMonitor(), vipStatus,
flagSource);
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'");
waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation");
} | when(flagSource.getString(new FlagId(bootstrapFeatureFlag))).thenReturn(Optional.of("true")); | public void testBootstrapNonHostedOneConfigModel() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder);
String vespaVersion = "1.2.3";
List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion)));
List<Host> hosts = createHosts(vespaVersion);
InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true);
Curator curator = new MockCurator();
DeployTester tester = new DeployTester(modelFactories, configserverConfig,
Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()),
provisioner, curator);
tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now());
ApplicationId applicationId = tester.applicationId();
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2"));
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.BOOTSTRAP_IN_SEPARATE_THREAD);
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'");
waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation");
} | class ConfigServerBootstrapTest {
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@Test
public void testBootstrap() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder);
InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3");
DeployTester tester = new DeployTester(configserverConfig, provisioner);
tester.deployApp("src/test/apps/hosted/");
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
provisioner.allocations().values().iterator().next().remove(0);
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer,
versionState, createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.INITIALIZE_ONLY,
ConfigServerBootstrap.RedeployingApplicationsFails.CONTINUE);
assertFalse(vipStatus.isInRotation());
bootstrap.start();
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'");
waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation");
bootstrap.deconstruct();
assertEquals(StateMonitor.Status.down, bootstrap.status());
assertFalse(rpcServer.isRunning());
assertFalse(vipStatus.isInRotation());
}
@Test
public void testBootstrapWhenRedeploymentFails() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder);
DeployTester tester = new DeployTester(configserverConfig);
tester.deployApp("src/test/apps/hosted/");
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir())
.resolve("tenants/")
.resolve(tester.tenant().getName().value())
.resolve("sessions/2/services.xml"));
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.INITIALIZE_ONLY,
ConfigServerBootstrap.RedeployingApplicationsFails.CONTINUE);
assertFalse(vipStatus.isInRotation());
bootstrap.start();
assertEquals(StateMonitor.Status.initializing, bootstrap.status());
assertFalse(rpcServer.isRunning());
assertFalse(vipStatus.isInRotation());
bootstrap.deconstruct();
}
@Test
private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException {
Duration timeout = Duration.ofSeconds(60);
Instant endTime = Instant.now().plus(timeout);
while (Instant.now().isBefore(endTime)) {
if (booleanSupplier.getAsBoolean())
return;
Thread.sleep(10);
}
throw new RuntimeException(messageIfWaitingFails);
}
private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException {
return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder());
}
private StateMonitor createStateMonitor() {
return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")),
new SystemTimer());
}
private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException {
return createConfigserverConfig(temporaryFolder, true);
}
private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException {
return createConfigserverConfig(temporaryFolder, false);
}
private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException {
return new ConfigserverConfig(new ConfigserverConfig.Builder()
.configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath())
.configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath())
.hostedVespa(hosted)
.multitenant(hosted)
.maxDurationOfBootstrap(1) /* seconds */
.sleepTimeWhenRedeployingFails(0)); /* seconds */
}
private List<Host> createHosts(String vespaVersion) {
return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion));
}
private Host createHost(String hostname, String version) {
return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version)));
}
public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc {
volatile boolean isRunning = false;
MockRpc(int port, File tempDir) {
super(port, tempDir);
}
@Override
public void run() {
isRunning = true;
}
@Override
public void stop() {
isRunning = false;
}
@Override
public boolean isRunning() {
return isRunning;
}
}
} | class ConfigServerBootstrapTest {
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@Test
public void testBootstrap() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder);
InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3");
DeployTester tester = new DeployTester(configserverConfig, provisioner);
tester.deployApp("src/test/apps/hosted/");
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
provisioner.allocations().values().iterator().next().remove(0);
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer,
versionState, createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.INITIALIZE_ONLY);
assertFalse(vipStatus.isInRotation());
bootstrap.start();
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'");
waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation");
bootstrap.deconstruct();
assertEquals(StateMonitor.Status.down, bootstrap.status());
assertFalse(rpcServer.isRunning());
assertFalse(vipStatus.isInRotation());
}
@Test
public void testBootstrapWhenRedeploymentFails() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder);
DeployTester tester = new DeployTester(configserverConfig);
tester.deployApp("src/test/apps/hosted/");
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir())
.resolve("tenants/")
.resolve(tester.tenant().getName().value())
.resolve("sessions/2/services.xml"));
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.INITIALIZE_ONLY);
assertFalse(vipStatus.isInRotation());
bootstrap.start();
assertEquals(StateMonitor.Status.initializing, bootstrap.status());
assertFalse(rpcServer.isRunning());
assertFalse(vipStatus.isInRotation());
bootstrap.deconstruct();
}
@Test
private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException {
Duration timeout = Duration.ofSeconds(60);
Instant endTime = Instant.now().plus(timeout);
while (Instant.now().isBefore(endTime)) {
if (booleanSupplier.getAsBoolean())
return;
Thread.sleep(10);
}
throw new RuntimeException(messageIfWaitingFails);
}
private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException {
return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder());
}
private StateMonitor createStateMonitor() {
return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")),
new SystemTimer());
}
private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException {
return createConfigserverConfig(temporaryFolder, true);
}
private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException {
return createConfigserverConfig(temporaryFolder, false);
}
private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException {
return new ConfigserverConfig(new ConfigserverConfig.Builder()
.configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath())
.configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath())
.hostedVespa(hosted)
.multitenant(hosted)
.maxDurationOfBootstrap(1) /* seconds */
.sleepTimeWhenRedeployingFails(0)); /* seconds */
}
private List<Host> createHosts(String vespaVersion) {
return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion));
}
private Host createHost(String hostname, String version) {
return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version)));
}
public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc {
volatile boolean isRunning = false;
MockRpc(int port, File tempDir) {
super(port, tempDir);
}
@Override
public void run() {
isRunning = true;
}
@Override
public void stop() {
isRunning = false;
}
@Override
public boolean isRunning() {
return isRunning;
}
}
} |
Yes, I see that I can simplify this a bit | public void testBootstrapNonHostedOneConfigModel() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder);
String vespaVersion = "1.2.3";
List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion)));
List<Host> hosts = createHosts(vespaVersion);
InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true);
Curator curator = new MockCurator();
DeployTester tester = new DeployTester(modelFactories, configserverConfig,
Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()),
provisioner, curator);
tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now());
ApplicationId applicationId = tester.applicationId();
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2"));
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
FlagSource flagSource = mock(FlagSource.class);
when(flagSource.getString(new FlagId(bootstrapFeatureFlag))).thenReturn(Optional.of("true"));
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
createStateMonitor(), vipStatus,
flagSource);
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'");
waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation");
} | when(flagSource.getString(new FlagId(bootstrapFeatureFlag))).thenReturn(Optional.of("true")); | public void testBootstrapNonHostedOneConfigModel() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder);
String vespaVersion = "1.2.3";
List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion)));
List<Host> hosts = createHosts(vespaVersion);
InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true);
Curator curator = new MockCurator();
DeployTester tester = new DeployTester(modelFactories, configserverConfig,
Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()),
provisioner, curator);
tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now());
ApplicationId applicationId = tester.applicationId();
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2"));
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.BOOTSTRAP_IN_SEPARATE_THREAD);
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'");
waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation");
} | class ConfigServerBootstrapTest {
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@Test
public void testBootstrap() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder);
InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3");
DeployTester tester = new DeployTester(configserverConfig, provisioner);
tester.deployApp("src/test/apps/hosted/");
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
provisioner.allocations().values().iterator().next().remove(0);
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer,
versionState, createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.INITIALIZE_ONLY,
ConfigServerBootstrap.RedeployingApplicationsFails.CONTINUE);
assertFalse(vipStatus.isInRotation());
bootstrap.start();
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'");
waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation");
bootstrap.deconstruct();
assertEquals(StateMonitor.Status.down, bootstrap.status());
assertFalse(rpcServer.isRunning());
assertFalse(vipStatus.isInRotation());
}
@Test
public void testBootstrapWhenRedeploymentFails() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder);
DeployTester tester = new DeployTester(configserverConfig);
tester.deployApp("src/test/apps/hosted/");
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir())
.resolve("tenants/")
.resolve(tester.tenant().getName().value())
.resolve("sessions/2/services.xml"));
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.INITIALIZE_ONLY,
ConfigServerBootstrap.RedeployingApplicationsFails.CONTINUE);
assertFalse(vipStatus.isInRotation());
bootstrap.start();
assertEquals(StateMonitor.Status.initializing, bootstrap.status());
assertFalse(rpcServer.isRunning());
assertFalse(vipStatus.isInRotation());
bootstrap.deconstruct();
}
@Test
private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException {
Duration timeout = Duration.ofSeconds(60);
Instant endTime = Instant.now().plus(timeout);
while (Instant.now().isBefore(endTime)) {
if (booleanSupplier.getAsBoolean())
return;
Thread.sleep(10);
}
throw new RuntimeException(messageIfWaitingFails);
}
private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException {
return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder());
}
private StateMonitor createStateMonitor() {
return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")),
new SystemTimer());
}
private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException {
return createConfigserverConfig(temporaryFolder, true);
}
private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException {
return createConfigserverConfig(temporaryFolder, false);
}
private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException {
return new ConfigserverConfig(new ConfigserverConfig.Builder()
.configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath())
.configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath())
.hostedVespa(hosted)
.multitenant(hosted)
.maxDurationOfBootstrap(1) /* seconds */
.sleepTimeWhenRedeployingFails(0)); /* seconds */
}
private List<Host> createHosts(String vespaVersion) {
return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion));
}
private Host createHost(String hostname, String version) {
return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version)));
}
public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc {
volatile boolean isRunning = false;
MockRpc(int port, File tempDir) {
super(port, tempDir);
}
@Override
public void run() {
isRunning = true;
}
@Override
public void stop() {
isRunning = false;
}
@Override
public boolean isRunning() {
return isRunning;
}
}
} | class ConfigServerBootstrapTest {
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@Test
public void testBootstrap() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder);
InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3");
DeployTester tester = new DeployTester(configserverConfig, provisioner);
tester.deployApp("src/test/apps/hosted/");
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
provisioner.allocations().values().iterator().next().remove(0);
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer,
versionState, createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.INITIALIZE_ONLY);
assertFalse(vipStatus.isInRotation());
bootstrap.start();
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'");
waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation");
bootstrap.deconstruct();
assertEquals(StateMonitor.Status.down, bootstrap.status());
assertFalse(rpcServer.isRunning());
assertFalse(vipStatus.isInRotation());
}
@Test
public void testBootstrapWhenRedeploymentFails() throws Exception {
ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder);
DeployTester tester = new DeployTester(configserverConfig);
tester.deployApp("src/test/apps/hosted/");
File versionFile = temporaryFolder.newFile();
VersionState versionState = new VersionState(versionFile);
assertTrue(versionState.isUpgraded());
java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir())
.resolve("tenants/")
.resolve(tester.tenant().getName().value())
.resolve("sessions/2/services.xml"));
RpcServer rpcServer = createRpcServer(configserverConfig);
VipStatus vipStatus = new VipStatus();
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
createStateMonitor(), vipStatus,
ConfigServerBootstrap.Mode.INITIALIZE_ONLY);
assertFalse(vipStatus.isInRotation());
bootstrap.start();
assertEquals(StateMonitor.Status.initializing, bootstrap.status());
assertFalse(rpcServer.isRunning());
assertFalse(vipStatus.isInRotation());
bootstrap.deconstruct();
}
@Test
private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException {
Duration timeout = Duration.ofSeconds(60);
Instant endTime = Instant.now().plus(timeout);
while (Instant.now().isBefore(endTime)) {
if (booleanSupplier.getAsBoolean())
return;
Thread.sleep(10);
}
throw new RuntimeException(messageIfWaitingFails);
}
private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException {
return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder());
}
private StateMonitor createStateMonitor() {
return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")),
new SystemTimer());
}
private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException {
return createConfigserverConfig(temporaryFolder, true);
}
private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException {
return createConfigserverConfig(temporaryFolder, false);
}
private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException {
return new ConfigserverConfig(new ConfigserverConfig.Builder()
.configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath())
.configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath())
.hostedVespa(hosted)
.multitenant(hosted)
.maxDurationOfBootstrap(1) /* seconds */
.sleepTimeWhenRedeployingFails(0)); /* seconds */
}
private List<Host> createHosts(String vespaVersion) {
return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion));
}
private Host createHost(String hostname, String version) {
return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version)));
}
public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc {
volatile boolean isRunning = false;
MockRpc(int port, File tempDir) {
super(port, tempDir);
}
@Override
public void run() {
isRunning = true;
}
@Override
public void stop() {
isRunning = false;
}
@Override
public boolean isRunning() {
return isRunning;
}
}
} |
Just a Java tip: You can avoid the if clause by passing a closure to fine(). | public Optional<Group> takeGroup(Set<Integer> rejectedGroups) {
synchronized (this) {
Optional<GroupStatus> best = scheduler.takeNextGroup(rejectedGroups);
if (best.isPresent()) {
GroupStatus gs = best.get();
gs.allocate();
Group ret = gs.group;
if (log.isLoggable(Level.FINE)) {
log.fine("Offering <" + ret + "> for query connection");
}
return Optional.of(ret);
} else {
return Optional.empty();
}
}
} | if (log.isLoggable(Level.FINE)) { | public Optional<Group> takeGroup(Set<Integer> rejectedGroups) {
synchronized (this) {
Optional<GroupStatus> best = scheduler.takeNextGroup(rejectedGroups);
if (best.isPresent()) {
GroupStatus gs = best.get();
gs.allocate();
Group ret = gs.group;
log.fine(() -> "Offering <" + ret + "> for query connection");
return Optional.of(ret);
} else {
return Optional.empty();
}
}
} | class LoadBalancer {
private static final Logger log = Logger.getLogger(LoadBalancer.class.getName());
private static final long DEFAULT_LATENCY_DECAY_RATE = 1000;
private static final long MIN_LATENCY_DECAY_RATE = 42;
private static final double INITIAL_QUERY_TIME = 0.001;
private static final double MIN_QUERY_TIME = 0.001;
private final List<GroupStatus> scoreboard;
private final GroupScheduler scheduler;
public LoadBalancer(SearchCluster searchCluster, boolean roundRobin) {
this.scoreboard = new ArrayList<>(searchCluster.groups().size());
for (Group group : searchCluster.orderedGroups()) {
scoreboard.add(new GroupStatus(group));
}
if (roundRobin) {
this.scheduler = new RoundRobinScheduler(scoreboard);
} else {
this.scheduler = new AdaptiveScheduler(new Random(), scoreboard);
}
}
/**
* Select and allocate the search cluster group which is to be used for the next search query. Callers <b>must</b> call
* {@link
*
* @param rejectedGroups if not null, the load balancer will only return groups with IDs not in the set
* @return The node group to target, or <i>empty</i> if the internal dispatch logic cannot be used
*/
/**
* Release an allocation given by {@link
*
* @param group
* previously allocated group
* @param success
* was the query successful
* @param searchTimeMs
* query execution time in milliseconds, used for adaptive load balancing
*/
public void releaseGroup(Group group, boolean success, double searchTimeMs) {
synchronized (this) {
for (GroupStatus sched : scoreboard) {
if (sched.group.id() == group.id()) {
sched.release(success, (double) searchTimeMs / 1000.0);
break;
}
}
}
}
static class GroupStatus {
private final Group group;
private int allocations = 0;
private long queries = 0;
private double averageSearchTime = INITIAL_QUERY_TIME;
GroupStatus(Group group) {
this.group = group;
}
void allocate() {
allocations++;
}
void release(boolean success, double searchTime) {
allocations--;
if (allocations < 0) {
log.warning("Double free of query target group detected");
allocations = 0;
}
if (success) {
searchTime = Math.max(searchTime, MIN_QUERY_TIME);
double decayRate = Math.min(queries + MIN_LATENCY_DECAY_RATE, DEFAULT_LATENCY_DECAY_RATE);
averageSearchTime = (searchTime + (decayRate - 1) * averageSearchTime) / decayRate;
queries++;
}
}
double averageSearchTime() {
return averageSearchTime;
}
double averageSearchTimeInverse() {
return 1.0 / averageSearchTime;
}
int groupId() {
return group.id();
}
void setQueryStatistics(long queries, double averageSearchTime) {
this.queries = queries;
this.averageSearchTime = averageSearchTime;
}
}
private interface GroupScheduler {
Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups);
}
private static class RoundRobinScheduler implements GroupScheduler {
private int needle = 0;
private final List<GroupStatus> scoreboard;
public RoundRobinScheduler(List<GroupStatus> scoreboard) {
this.scoreboard = scoreboard;
}
@Override
public Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups) {
GroupStatus bestCandidate = null;
int bestIndex = needle;
int index = needle;
for (int i = 0; i < scoreboard.size(); i++) {
GroupStatus candidate = scoreboard.get(index);
if (rejectedGroups == null || !rejectedGroups.contains(candidate.group.id())) {
GroupStatus better = betterGroup(bestCandidate, candidate);
if (better == candidate) {
bestCandidate = candidate;
bestIndex = index;
}
}
index = nextScoreboardIndex(index);
}
needle = nextScoreboardIndex(bestIndex);
return Optional.ofNullable(bestCandidate);
}
/**
* Select the better of the two given GroupStatus objects, biased to the first
* parameter. Thus, if all groups have equal coverage sufficiency, the one
* currently at the needle will be used. Either parameter can be null, in which
* case any non-null will be preferred.
*
* @param first preferred GroupStatus
* @param second potentially better GroupStatus
* @return the better of the two
*/
private static GroupStatus betterGroup(GroupStatus first, GroupStatus second) {
if (second == null) {
return first;
}
if (first == null) {
return second;
}
if (first.group.hasSufficientCoverage() != second.group.hasSufficientCoverage()) {
if (!first.group.hasSufficientCoverage()) {
return second;
} else {
return first;
}
}
return first;
}
private int nextScoreboardIndex(int current) {
int next = current + 1;
if (next >= scoreboard.size()) {
next %= scoreboard.size();
}
return next;
}
}
static class AdaptiveScheduler implements GroupScheduler {
private final Random random;
private final List<GroupStatus> scoreboard;
public AdaptiveScheduler(Random random, List<GroupStatus> scoreboard) {
this.random = random;
this.scoreboard = scoreboard;
}
private Optional<GroupStatus> selectGroup(double needle, boolean requireCoverage, Set<Integer> rejected) {
double sum = 0;
int n = 0;
for (GroupStatus gs : scoreboard) {
if (rejected == null || !rejected.contains(gs.group.id())) {
if (!requireCoverage || gs.group.hasSufficientCoverage()) {
sum += gs.averageSearchTimeInverse();
n++;
}
}
}
if (n == 0) {
return Optional.empty();
}
double accum = 0;
for (GroupStatus gs : scoreboard) {
if (rejected == null || !rejected.contains(gs.group.id())) {
if (!requireCoverage || gs.group.hasSufficientCoverage()) {
accum += gs.averageSearchTimeInverse();
if (needle < accum / sum) {
return Optional.of(gs);
}
}
}
}
return Optional.empty();
}
@Override
public Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups) {
double needle = random.nextDouble();
Optional<GroupStatus> gs = selectGroup(needle, true, rejectedGroups);
if (gs.isPresent()) {
return gs;
}
return selectGroup(needle, false, rejectedGroups);
}
}
} | class LoadBalancer {
private static final Logger log = Logger.getLogger(LoadBalancer.class.getName());
private static final long DEFAULT_LATENCY_DECAY_RATE = 1000;
private static final long MIN_LATENCY_DECAY_RATE = 42;
private static final double INITIAL_QUERY_TIME = 0.001;
private static final double MIN_QUERY_TIME = 0.001;
private final List<GroupStatus> scoreboard;
private final GroupScheduler scheduler;
public LoadBalancer(SearchCluster searchCluster, boolean roundRobin) {
this.scoreboard = new ArrayList<>(searchCluster.groups().size());
for (Group group : searchCluster.orderedGroups()) {
scoreboard.add(new GroupStatus(group));
}
if (roundRobin) {
this.scheduler = new RoundRobinScheduler(scoreboard);
} else {
this.scheduler = new AdaptiveScheduler(new Random(), scoreboard);
}
}
/**
* Select and allocate the search cluster group which is to be used for the next search query. Callers <b>must</b> call
* {@link
*
* @param rejectedGroups if not null, the load balancer will only return groups with IDs not in the set
* @return The node group to target, or <i>empty</i> if the internal dispatch logic cannot be used
*/
/**
* Release an allocation given by {@link
*
* @param group
* previously allocated group
* @param success
* was the query successful
* @param searchTimeMs
* query execution time in milliseconds, used for adaptive load balancing
*/
public void releaseGroup(Group group, boolean success, double searchTimeMs) {
synchronized (this) {
for (GroupStatus sched : scoreboard) {
if (sched.group.id() == group.id()) {
sched.release(success, (double) searchTimeMs / 1000.0);
break;
}
}
}
}
static class GroupStatus {
private final Group group;
private int allocations = 0;
private long queries = 0;
private double averageSearchTime = INITIAL_QUERY_TIME;
GroupStatus(Group group) {
this.group = group;
}
void allocate() {
allocations++;
}
void release(boolean success, double searchTime) {
allocations--;
if (allocations < 0) {
log.warning("Double free of query target group detected");
allocations = 0;
}
if (success) {
searchTime = Math.max(searchTime, MIN_QUERY_TIME);
double decayRate = Math.min(queries + MIN_LATENCY_DECAY_RATE, DEFAULT_LATENCY_DECAY_RATE);
averageSearchTime = (searchTime + (decayRate - 1) * averageSearchTime) / decayRate;
queries++;
}
}
double averageSearchTime() {
return averageSearchTime;
}
double averageSearchTimeInverse() {
return 1.0 / averageSearchTime;
}
int groupId() {
return group.id();
}
void setQueryStatistics(long queries, double averageSearchTime) {
this.queries = queries;
this.averageSearchTime = averageSearchTime;
}
}
private interface GroupScheduler {
Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups);
}
private static class RoundRobinScheduler implements GroupScheduler {
private int needle = 0;
private final List<GroupStatus> scoreboard;
public RoundRobinScheduler(List<GroupStatus> scoreboard) {
this.scoreboard = scoreboard;
}
@Override
public Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups) {
GroupStatus bestCandidate = null;
int bestIndex = needle;
int index = needle;
for (int i = 0; i < scoreboard.size(); i++) {
GroupStatus candidate = scoreboard.get(index);
if (rejectedGroups == null || !rejectedGroups.contains(candidate.group.id())) {
GroupStatus better = betterGroup(bestCandidate, candidate);
if (better == candidate) {
bestCandidate = candidate;
bestIndex = index;
}
}
index = nextScoreboardIndex(index);
}
needle = nextScoreboardIndex(bestIndex);
return Optional.ofNullable(bestCandidate);
}
/**
* Select the better of the two given GroupStatus objects, biased to the first
* parameter. Thus, if all groups have equal coverage sufficiency, the one
* currently at the needle will be used. Either parameter can be null, in which
* case any non-null will be preferred.
*
* @param first preferred GroupStatus
* @param second potentially better GroupStatus
* @return the better of the two
*/
private static GroupStatus betterGroup(GroupStatus first, GroupStatus second) {
if (second == null) {
return first;
}
if (first == null) {
return second;
}
if (first.group.hasSufficientCoverage() != second.group.hasSufficientCoverage()) {
if (!first.group.hasSufficientCoverage()) {
return second;
} else {
return first;
}
}
return first;
}
private int nextScoreboardIndex(int current) {
int next = current + 1;
if (next >= scoreboard.size()) {
next %= scoreboard.size();
}
return next;
}
}
static class AdaptiveScheduler implements GroupScheduler {
private final Random random;
private final List<GroupStatus> scoreboard;
public AdaptiveScheduler(Random random, List<GroupStatus> scoreboard) {
this.random = random;
this.scoreboard = scoreboard;
}
private Optional<GroupStatus> selectGroup(double needle, boolean requireCoverage, Set<Integer> rejected) {
double sum = 0;
int n = 0;
for (GroupStatus gs : scoreboard) {
if (rejected == null || !rejected.contains(gs.group.id())) {
if (!requireCoverage || gs.group.hasSufficientCoverage()) {
sum += gs.averageSearchTimeInverse();
n++;
}
}
}
if (n == 0) {
return Optional.empty();
}
double accum = 0;
for (GroupStatus gs : scoreboard) {
if (rejected == null || !rejected.contains(gs.group.id())) {
if (!requireCoverage || gs.group.hasSufficientCoverage()) {
accum += gs.averageSearchTimeInverse();
if (needle < accum / sum) {
return Optional.of(gs);
}
}
}
}
return Optional.empty();
}
@Override
public Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups) {
double needle = random.nextDouble();
Optional<GroupStatus> gs = selectGroup(needle, true, rejectedGroups);
if (gs.isPresent()) {
return gs;
}
return selectGroup(needle, false, rejectedGroups);
}
}
} |
Redundant assert, is asserted below. | public void test() {
MockCurator curator = new MockCurator();
FlagsDbImpl db = new FlagsDbImpl(curator);
Condition condition1 = new Condition(Condition.Type.WHITELIST, FetchVector.Dimension.HOSTNAME, "host1");
Rule rule1 = new Rule(Optional.of(JsonNodeRawFlag.fromJson("13")), condition1);
FlagData data = new FlagData(new FetchVector().with(FetchVector.Dimension.ZONE_ID, "zone-a"), rule1);
FlagId flagId = new FlagId("id");
db.setValue(flagId, data);
assertTrue(db.getValue(flagId).isPresent());
Optional<FlagData> dataCopy = db.getValue(flagId);
assertTrue(dataCopy.isPresent());
assertEquals("{\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\"," +
"\"values\":[\"host1\"]}],\"value\":13}],\"attributes\":{\"zone\":\"zone-a\"}}",
dataCopy.get().serializeToJson());
FlagId flagId2 = new FlagId("id2");
db.setValue(flagId2, data);
Map<FlagId, FlagData> flags = db.getAllFlags();
assertThat(flags.size(), equalTo(2));
assertThat(flags.get(flagId), notNullValue());
assertThat(flags.get(flagId2), notNullValue());
db.removeValue(flagId2);
assertFalse(db.getValue(flagId2).isPresent());
} | assertTrue(db.getValue(flagId).isPresent()); | public void test() {
MockCurator curator = new MockCurator();
FlagsDbImpl db = new FlagsDbImpl(curator);
Condition condition1 = new Condition(Condition.Type.WHITELIST, FetchVector.Dimension.HOSTNAME, "host1");
Rule rule1 = new Rule(Optional.of(JsonNodeRawFlag.fromJson("13")), condition1);
FlagId flagId = new FlagId("id");
FlagData data = new FlagData(flagId, new FetchVector().with(FetchVector.Dimension.ZONE_ID, "zone-a"), rule1);
db.setValue(flagId, data);
Optional<FlagData> dataCopy = db.getValue(flagId);
assertTrue(dataCopy.isPresent());
assertEquals("{\"id\":\"id\",\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\"," +
"\"values\":[\"host1\"]}],\"value\":13}],\"attributes\":{\"zone\":\"zone-a\"}}",
dataCopy.get().serializeToJson());
FlagId flagId2 = new FlagId("id2");
db.setValue(flagId2, data);
Map<FlagId, FlagData> flags = db.getAllFlags();
assertThat(flags.size(), equalTo(2));
assertThat(flags.get(flagId), notNullValue());
assertThat(flags.get(flagId2), notNullValue());
db.removeValue(flagId2);
assertFalse(db.getValue(flagId2).isPresent());
} | class FlagsDbImplTest {
@Test
} | class FlagsDbImplTest {
@Test
} |
Some classes in this package use `HttpConfigResponse.JSON_CONTENT_TYPE` and other use `application/json`, standardize? | public String getContentType() {
return "application/json";
} | return "application/json"; | public String getContentType() {
return HttpConfigResponse.JSON_CONTENT_TYPE;
} | class FlagDataListResponse extends HttpResponse {
private static ObjectMapper mapper = new ObjectMapper();
private final String flagsV1Uri;
private final Map<FlagId, FlagData> flags;
private final boolean showDataInsteadOfUrl;
public FlagDataListResponse(String flagsV1Uri, Map<FlagId, FlagData> flags, boolean showDataInsteadOfUrl) {
super(Response.Status.OK);
this.flagsV1Uri = flagsV1Uri;
this.flags = flags;
this.showDataInsteadOfUrl = showDataInsteadOfUrl;
}
@Override
public void render(OutputStream outputStream) {
ObjectNode rootNode = mapper.createObjectNode();
new TreeMap<>(flags).forEach((flagId, flagData) -> {
if (showDataInsteadOfUrl) {
rootNode.set(flagId.toString(), flagData.toJsonNode());
} else {
rootNode.putObject(flagId.toString()).put("url", flagsV1Uri + "/data/" + flagId.toString());
}
});
uncheck(() -> mapper.writeValue(outputStream, rootNode));
}
@Override
} | class FlagDataListResponse extends HttpResponse {
private static ObjectMapper mapper = new ObjectMapper();
private final String flagsV1Uri;
private final Map<FlagId, FlagData> flags;
private final boolean recursive;
public FlagDataListResponse(String flagsV1Uri, Map<FlagId, FlagData> flags, boolean recursive) {
super(Response.Status.OK);
this.flagsV1Uri = flagsV1Uri;
this.flags = flags;
this.recursive = recursive;
}
@Override
public void render(OutputStream outputStream) {
ObjectNode rootNode = mapper.createObjectNode();
ArrayNode flagsArray = rootNode.putArray("flags");
new TreeMap<>(this.flags).forEach((flagId, flagData) -> {
if (recursive) {
flagsArray.add(flagData.toJsonNode());
} else {
ObjectNode object = flagsArray.addObject();
object.put("id", flagId.toString());
object.put("url", flagsV1Uri + "/data/" + flagId.toString());
}
});
uncheck(() -> mapper.writeValue(outputStream, rootNode));
}
@Override
} |
For the future, you can use the log method that takes a supplier. Then the log message will only be created of the log level is enabled: ``` log.log(DEBUG, () -> msg) ``` | private void doHandle(JRTConfigSubscription<ConfigInstance> sub, JRTClientConfigRequest jrtReq, Connection connection) {
boolean validResponse = jrtReq.validateResponse();
if (log.isLoggable(LogLevel.DEBUG)) {
log.log(LogLevel.DEBUG, "Request callback " +(validResponse ? "valid" : "invalid") + ". Req: " + jrtReq + "\nSpec: " + connection);
}
if (sub.getState() == ConfigSubscription.State.CLOSED) return;
Trace trace = jrtReq.getResponseTrace();
trace.trace(TRACELEVEL, "JRTConfigRequester.doHandle()");
if (log.isLoggable(LogLevel.SPAM)) {
log.log(LogLevel.SPAM, trace.toString());
}
if (validResponse) {
handleOKRequest(jrtReq, sub, connection);
} else {
logWhenErrorResponse(jrtReq, connection);
handleFailedRequest(jrtReq, sub, connection);
}
} | if (log.isLoggable(LogLevel.DEBUG)) { | private void doHandle(JRTConfigSubscription<ConfigInstance> sub, JRTClientConfigRequest jrtReq, Connection connection) {
boolean validResponse = jrtReq.validateResponse();
if (log.isLoggable(LogLevel.DEBUG)) {
log.log(LogLevel.DEBUG, "Request callback " +(validResponse ? "valid" : "invalid") + ". Req: " + jrtReq + "\nSpec: " + connection);
}
if (sub.getState() == ConfigSubscription.State.CLOSED) return;
Trace trace = jrtReq.getResponseTrace();
trace.trace(TRACELEVEL, "JRTConfigRequester.doHandle()");
if (log.isLoggable(LogLevel.SPAM)) {
log.log(LogLevel.SPAM, trace.toString());
}
if (validResponse) {
handleOKRequest(jrtReq, sub, connection);
} else {
logWhenErrorResponse(jrtReq, connection);
handleFailedRequest(jrtReq, sub, connection);
}
} | class JRTConfigRequester implements RequestWaiter {
private static final Logger log = Logger.getLogger(JRTConfigRequester.class.getName());
public static final ConfigSourceSet defaultSourceSet = ConfigSourceSet.createDefault();
private static final int TRACELEVEL = 6;
private final TimingValues timingValues;
private int fatalFailures = 0;
private int transientFailures = 0;
private final ScheduledThreadPoolExecutor scheduler = new ScheduledThreadPoolExecutor(1, new JRTSourceThreadFactory());
private Instant suspendWarningLogged = Instant.MIN;
private Instant noApplicationWarningLogged = Instant.MIN;
private static final Duration delayBetweenWarnings = Duration.ofSeconds(60);
private final ConnectionPool connectionPool;
static final float randomFraction = 0.2f;
/* Time to be added to server timeout to create client timeout. This is the time allowed for the server to respond after serverTimeout has elapsed. */
private static final Double additionalTimeForClientTimeout = 5.0;
private static final SimpleDateFormat yyyyMMddz;
static {
yyyyMMddz = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z");
yyyyMMddz.setTimeZone(TimeZone.getTimeZone("GMT"));
}
/**
* Returns a new requester
* @param connectionPool The connectionPool to use
* @param timingValues The timing values
* @return new requester object
*/
public static JRTConfigRequester get(ConnectionPool connectionPool, TimingValues timingValues) {
return new JRTConfigRequester(connectionPool, timingValues);
}
/**
* New requester
* @param connectionPool the connectionPool this requester should use
* @param timingValues timeouts and delays used when sending JRT config requests
*/
JRTConfigRequester(ConnectionPool connectionPool, TimingValues timingValues) {
this.connectionPool = connectionPool;
this.timingValues = timingValues;
}
/**
* Requests the config for the {@link com.yahoo.config.ConfigInstance} on the given {@link ConfigSubscription}
*
* @param sub a subscription
*/
public <T extends ConfigInstance> void request(JRTConfigSubscription<T> sub) {
JRTClientConfigRequest req = JRTConfigRequestFactory.createFromSub(sub);
doRequest(sub, req, timingValues.getSubscribeTimeout());
}
private <T extends ConfigInstance> void doRequest(JRTConfigSubscription<T> sub,
JRTClientConfigRequest req, long timeout) {
com.yahoo.vespa.config.Connection connection = connectionPool.getCurrent();
req.getRequest().setContext(new RequestContext(sub, req, connection));
boolean reqOK = req.validateParameters();
if (!reqOK) throw new ConfigurationRuntimeException("Error in parameters for config request: " + req);
double jrtClientTimeout = getClientTimeout(timeout);
if (log.isLoggable(LogLevel.DEBUG)) {
log.log(LogLevel.DEBUG, "Requesting config for " + sub + " on connection " + connection
+ " with RPC timeout " + jrtClientTimeout +
(log.isLoggable(LogLevel.SPAM) ? (",defcontent=" + req.getDefContent().asString()) : ""));
}
connection.invokeAsync(req.getRequest(), jrtClientTimeout, this);
}
@SuppressWarnings("unchecked")
@Override
public void handleRequestDone(Request req) {
JRTConfigSubscription<ConfigInstance> sub = null;
try {
RequestContext context = (RequestContext) req.getContext();
sub = context.sub;
doHandle(sub, context.jrtReq, context.connection);
} catch (RuntimeException e) {
if (sub != null) {
sub.setException(e);
} else {
log.log(Level.SEVERE, "Failed to get subscription object from JRT config callback: " +
Exceptions.toMessageString(e));
}
}
}
private void logWhenErrorResponse(JRTClientConfigRequest jrtReq, Connection connection) {
switch (jrtReq.errorCode()) {
case com.yahoo.jrt.ErrorCode.CONNECTION:
log.log(LogLevel.DEBUG, "Request callback failed: " + jrtReq.errorMessage() +
"\nConnection spec: " + connection);
break;
case ErrorCode.APPLICATION_NOT_LOADED:
case ErrorCode.UNKNOWN_VESPA_VERSION:
if (noApplicationWarningLogged.isBefore(Instant.now().minus(delayBetweenWarnings))) {
log.log(LogLevel.WARNING, "Request callback failed: " + ErrorCode.getName(jrtReq.errorCode()) +
". Connection spec: " + connection.getAddress() +
", error message: " + jrtReq.errorMessage());
noApplicationWarningLogged = Instant.now();
}
break;
default:
log.log(LogLevel.WARNING, "Request callback failed. Req: " + jrtReq + "\nSpec: " + connection.getAddress() +
" . Req error message: " + jrtReq.errorMessage());
break;
}
}
private void handleFailedRequest(JRTClientConfigRequest jrtReq, JRTConfigSubscription<ConfigInstance> sub, Connection connection) {
final boolean configured = (sub.getConfigState().getConfig() != null);
if (configured) {
log.log(LogLevel.INFO, "Failure of config subscription, clients will keep existing config until resolved: " + sub);
}
final ErrorType errorType = ErrorType.getErrorType(jrtReq.errorCode());
connectionPool.setError(connection, jrtReq.errorCode());
long delay = calculateFailedRequestDelay(errorType, transientFailures, fatalFailures, timingValues, configured);
if (errorType == ErrorType.TRANSIENT) {
handleTransientlyFailed(jrtReq, sub, delay, connection);
} else {
handleFatallyFailed(jrtReq, sub, delay);
}
}
static long calculateFailedRequestDelay(ErrorType errorCode, int transientFailures, int fatalFailures,
TimingValues timingValues, boolean configured) {
long delay;
if (configured)
delay = timingValues.getConfiguredErrorDelay();
else
delay = timingValues.getUnconfiguredDelay();
if (errorCode == ErrorType.TRANSIENT) {
delay = delay * Math.min((transientFailures + 1), timingValues.getMaxDelayMultiplier());
} else {
delay = timingValues.getFixedDelay() + (delay * Math.min(fatalFailures, timingValues.getMaxDelayMultiplier()));
delay = timingValues.getPlusMinusFractionRandom(delay, randomFraction);
}
return delay;
}
private void handleTransientlyFailed(JRTClientConfigRequest jrtReq,
JRTConfigSubscription<ConfigInstance> sub,
long delay,
Connection connection) {
transientFailures++;
if (suspendWarningLogged.isBefore(Instant.now().minus(delayBetweenWarnings))) {
log.log(LogLevel.INFO, "Connection to " + connection.getAddress() +
" failed or timed out, clients will keep existing config, will keep trying.");
suspendWarningLogged = Instant.now();
}
if (sub.getState() != ConfigSubscription.State.OPEN) return;
scheduleNextRequest(jrtReq, sub, delay, calculateErrorTimeout());
}
private long calculateErrorTimeout() {
return timingValues.getPlusMinusFractionRandom(timingValues.getErrorTimeout(), randomFraction);
}
/**
* This handles a fatal error both in the case that the subscriber is configured and not.
* The difference is in the delay (passed from outside) and the log level used for
* error message.
*
* @param jrtReq a JRT config request
* @param sub a config subscription
* @param delay delay before sending a new request
*/
private void handleFatallyFailed(JRTClientConfigRequest jrtReq,
JRTConfigSubscription<ConfigInstance> sub, long delay) {
if (sub.getState() != ConfigSubscription.State.OPEN) return;
fatalFailures++;
Level logLevel = sub.getConfigState().getConfig() == null ? LogLevel.DEBUG : LogLevel.INFO;
String logMessage = "Request for config " + jrtReq.getShortDescription() + "' failed with error code " +
jrtReq.errorCode() + " (" + jrtReq.errorMessage() + "), scheduling new connect " +
" in " + delay + " ms";
log.log(logLevel, logMessage);
scheduleNextRequest(jrtReq, sub, delay, calculateErrorTimeout());
}
private void handleOKRequest(JRTClientConfigRequest jrtReq,
JRTConfigSubscription<ConfigInstance> sub,
Connection connection) {
fatalFailures = 0;
transientFailures = 0;
suspendWarningLogged = Instant.MIN;
noApplicationWarningLogged = Instant.MIN;
connection.setSuccess();
sub.setLastCallBackOKTS(System.currentTimeMillis());
if (jrtReq.hasUpdatedGeneration()) {
sub.getReqQueue().clear();
boolean putOK = sub.getReqQueue().offer(jrtReq);
if (!putOK) {
sub.setException(new ConfigurationRuntimeException("Could not put returned request on queue of subscription " + sub));
}
}
if (sub.getState() != ConfigSubscription.State.OPEN) return;
scheduleNextRequest(jrtReq, sub, calculateSuccessDelay(), calculateSuccessTimeout());
}
private long calculateSuccessTimeout() {
return timingValues.getPlusMinusFractionRandom(timingValues.getSuccessTimeout(), randomFraction);
}
private long calculateSuccessDelay() {
return timingValues.getPlusMinusFractionRandom(timingValues.getFixedDelay(), randomFraction);
}
private void scheduleNextRequest(JRTClientConfigRequest jrtReq, JRTConfigSubscription<?> sub, long delay, long timeout) {
if (delay < 0) delay = 0;
JRTClientConfigRequest jrtReqNew = jrtReq.nextRequest(timeout);
if (log.isLoggable(LogLevel.DEBUG)) {
log.log(LogLevel.DEBUG, "My timing values: " + timingValues);
log.log(LogLevel.DEBUG, "Scheduling new request " + delay + " millis from now for " + jrtReqNew.getConfigKey());
}
scheduler.schedule(new GetConfigTask(jrtReqNew, sub), delay, TimeUnit.MILLISECONDS);
}
/**
* Task that can be scheduled in a timer for executing a getConfig request
*/
private class GetConfigTask implements Runnable {
private final JRTClientConfigRequest jrtReq;
private final JRTConfigSubscription<?> sub;
GetConfigTask(JRTClientConfigRequest jrtReq, JRTConfigSubscription<?> sub) {
this.jrtReq = jrtReq;
this.sub = sub;
}
public void run() {
doRequest(sub, jrtReq, jrtReq.getTimeout());
}
}
public void close() {
suspendWarningLogged = Instant.now();
noApplicationWarningLogged = Instant.now();
connectionPool.close();
scheduler.shutdown();
}
private class JRTSourceThreadFactory implements ThreadFactory {
@SuppressWarnings("NullableProblems")
@Override
public Thread newThread(Runnable runnable) {
ThreadFactory tf = Executors.defaultThreadFactory();
Thread t = tf.newThread(runnable);
t.setDaemon(true);
return t;
}
}
@SuppressWarnings("rawtypes")
private static class RequestContext {
final JRTConfigSubscription sub;
final JRTClientConfigRequest jrtReq;
final Connection connection;
private RequestContext(JRTConfigSubscription sub, JRTClientConfigRequest jrtReq, Connection connection) {
this.sub = sub;
this.jrtReq = jrtReq;
this.connection = connection;
}
}
int getTransientFailures() {
return transientFailures;
}
int getFatalFailures() {
return fatalFailures;
}
public ConnectionPool getConnectionPool() {
return connectionPool;
}
private Double getClientTimeout(long serverTimeout) {
return (serverTimeout / 1000.0) + additionalTimeForClientTimeout;
}
} | class JRTConfigRequester implements RequestWaiter {
private static final Logger log = Logger.getLogger(JRTConfigRequester.class.getName());
public static final ConfigSourceSet defaultSourceSet = ConfigSourceSet.createDefault();
private static final int TRACELEVEL = 6;
private final TimingValues timingValues;
private int fatalFailures = 0;
private int transientFailures = 0;
private final ScheduledThreadPoolExecutor scheduler = new ScheduledThreadPoolExecutor(1, new JRTSourceThreadFactory());
private Instant suspendWarningLogged = Instant.MIN;
private Instant noApplicationWarningLogged = Instant.MIN;
private static final Duration delayBetweenWarnings = Duration.ofSeconds(60);
private final ConnectionPool connectionPool;
static final float randomFraction = 0.2f;
/* Time to be added to server timeout to create client timeout. This is the time allowed for the server to respond after serverTimeout has elapsed. */
private static final Double additionalTimeForClientTimeout = 5.0;
private static final SimpleDateFormat yyyyMMddz;
static {
yyyyMMddz = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z");
yyyyMMddz.setTimeZone(TimeZone.getTimeZone("GMT"));
}
/**
* Returns a new requester
* @param connectionPool The connectionPool to use
* @param timingValues The timing values
* @return new requester object
*/
public static JRTConfigRequester get(ConnectionPool connectionPool, TimingValues timingValues) {
return new JRTConfigRequester(connectionPool, timingValues);
}
/**
* New requester
* @param connectionPool the connectionPool this requester should use
* @param timingValues timeouts and delays used when sending JRT config requests
*/
JRTConfigRequester(ConnectionPool connectionPool, TimingValues timingValues) {
this.connectionPool = connectionPool;
this.timingValues = timingValues;
}
/**
* Requests the config for the {@link com.yahoo.config.ConfigInstance} on the given {@link ConfigSubscription}
*
* @param sub a subscription
*/
public <T extends ConfigInstance> void request(JRTConfigSubscription<T> sub) {
JRTClientConfigRequest req = JRTConfigRequestFactory.createFromSub(sub);
doRequest(sub, req, timingValues.getSubscribeTimeout());
}
private <T extends ConfigInstance> void doRequest(JRTConfigSubscription<T> sub,
JRTClientConfigRequest req, long timeout) {
com.yahoo.vespa.config.Connection connection = connectionPool.getCurrent();
req.getRequest().setContext(new RequestContext(sub, req, connection));
boolean reqOK = req.validateParameters();
if (!reqOK) throw new ConfigurationRuntimeException("Error in parameters for config request: " + req);
double jrtClientTimeout = getClientTimeout(timeout);
if (log.isLoggable(LogLevel.DEBUG)) {
log.log(LogLevel.DEBUG, "Requesting config for " + sub + " on connection " + connection
+ " with RPC timeout " + jrtClientTimeout +
(log.isLoggable(LogLevel.SPAM) ? (",defcontent=" + req.getDefContent().asString()) : ""));
}
connection.invokeAsync(req.getRequest(), jrtClientTimeout, this);
}
@SuppressWarnings("unchecked")
@Override
public void handleRequestDone(Request req) {
JRTConfigSubscription<ConfigInstance> sub = null;
try {
RequestContext context = (RequestContext) req.getContext();
sub = context.sub;
doHandle(sub, context.jrtReq, context.connection);
} catch (RuntimeException e) {
if (sub != null) {
sub.setException(e);
} else {
log.log(Level.SEVERE, "Failed to get subscription object from JRT config callback: " +
Exceptions.toMessageString(e));
}
}
}
private void logWhenErrorResponse(JRTClientConfigRequest jrtReq, Connection connection) {
switch (jrtReq.errorCode()) {
case com.yahoo.jrt.ErrorCode.CONNECTION:
log.log(LogLevel.DEBUG, "Request callback failed: " + jrtReq.errorMessage() +
"\nConnection spec: " + connection);
break;
case ErrorCode.APPLICATION_NOT_LOADED:
case ErrorCode.UNKNOWN_VESPA_VERSION:
if (noApplicationWarningLogged.isBefore(Instant.now().minus(delayBetweenWarnings))) {
log.log(LogLevel.WARNING, "Request callback failed: " + ErrorCode.getName(jrtReq.errorCode()) +
". Connection spec: " + connection.getAddress() +
", error message: " + jrtReq.errorMessage());
noApplicationWarningLogged = Instant.now();
}
break;
default:
log.log(LogLevel.WARNING, "Request callback failed. Req: " + jrtReq + "\nSpec: " + connection.getAddress() +
" . Req error message: " + jrtReq.errorMessage());
break;
}
}
private void handleFailedRequest(JRTClientConfigRequest jrtReq, JRTConfigSubscription<ConfigInstance> sub, Connection connection) {
final boolean configured = (sub.getConfigState().getConfig() != null);
if (configured) {
log.log(LogLevel.INFO, "Failure of config subscription, clients will keep existing config until resolved: " + sub);
}
final ErrorType errorType = ErrorType.getErrorType(jrtReq.errorCode());
connectionPool.setError(connection, jrtReq.errorCode());
long delay = calculateFailedRequestDelay(errorType, transientFailures, fatalFailures, timingValues, configured);
if (errorType == ErrorType.TRANSIENT) {
handleTransientlyFailed(jrtReq, sub, delay, connection);
} else {
handleFatallyFailed(jrtReq, sub, delay);
}
}
static long calculateFailedRequestDelay(ErrorType errorCode, int transientFailures, int fatalFailures,
TimingValues timingValues, boolean configured) {
long delay;
if (configured)
delay = timingValues.getConfiguredErrorDelay();
else
delay = timingValues.getUnconfiguredDelay();
if (errorCode == ErrorType.TRANSIENT) {
delay = delay * Math.min((transientFailures + 1), timingValues.getMaxDelayMultiplier());
} else {
delay = timingValues.getFixedDelay() + (delay * Math.min(fatalFailures, timingValues.getMaxDelayMultiplier()));
delay = timingValues.getPlusMinusFractionRandom(delay, randomFraction);
}
return delay;
}
private void handleTransientlyFailed(JRTClientConfigRequest jrtReq,
JRTConfigSubscription<ConfigInstance> sub,
long delay,
Connection connection) {
transientFailures++;
if (suspendWarningLogged.isBefore(Instant.now().minus(delayBetweenWarnings))) {
log.log(LogLevel.INFO, "Connection to " + connection.getAddress() +
" failed or timed out, clients will keep existing config, will keep trying.");
suspendWarningLogged = Instant.now();
}
if (sub.getState() != ConfigSubscription.State.OPEN) return;
scheduleNextRequest(jrtReq, sub, delay, calculateErrorTimeout());
}
private long calculateErrorTimeout() {
return timingValues.getPlusMinusFractionRandom(timingValues.getErrorTimeout(), randomFraction);
}
/**
* This handles a fatal error both in the case that the subscriber is configured and not.
* The difference is in the delay (passed from outside) and the log level used for
* error message.
*
* @param jrtReq a JRT config request
* @param sub a config subscription
* @param delay delay before sending a new request
*/
private void handleFatallyFailed(JRTClientConfigRequest jrtReq,
JRTConfigSubscription<ConfigInstance> sub, long delay) {
if (sub.getState() != ConfigSubscription.State.OPEN) return;
fatalFailures++;
Level logLevel = sub.getConfigState().getConfig() == null ? LogLevel.DEBUG : LogLevel.INFO;
String logMessage = "Request for config " + jrtReq.getShortDescription() + "' failed with error code " +
jrtReq.errorCode() + " (" + jrtReq.errorMessage() + "), scheduling new connect " +
" in " + delay + " ms";
log.log(logLevel, logMessage);
scheduleNextRequest(jrtReq, sub, delay, calculateErrorTimeout());
}
private void handleOKRequest(JRTClientConfigRequest jrtReq,
JRTConfigSubscription<ConfigInstance> sub,
Connection connection) {
fatalFailures = 0;
transientFailures = 0;
suspendWarningLogged = Instant.MIN;
noApplicationWarningLogged = Instant.MIN;
connection.setSuccess();
sub.setLastCallBackOKTS(System.currentTimeMillis());
if (jrtReq.hasUpdatedGeneration()) {
sub.getReqQueue().clear();
boolean putOK = sub.getReqQueue().offer(jrtReq);
if (!putOK) {
sub.setException(new ConfigurationRuntimeException("Could not put returned request on queue of subscription " + sub));
}
}
if (sub.getState() != ConfigSubscription.State.OPEN) return;
scheduleNextRequest(jrtReq, sub, calculateSuccessDelay(), calculateSuccessTimeout());
}
private long calculateSuccessTimeout() {
return timingValues.getPlusMinusFractionRandom(timingValues.getSuccessTimeout(), randomFraction);
}
private long calculateSuccessDelay() {
return timingValues.getPlusMinusFractionRandom(timingValues.getFixedDelay(), randomFraction);
}
private void scheduleNextRequest(JRTClientConfigRequest jrtReq, JRTConfigSubscription<?> sub, long delay, long timeout) {
if (delay < 0) delay = 0;
JRTClientConfigRequest jrtReqNew = jrtReq.nextRequest(timeout);
if (log.isLoggable(LogLevel.DEBUG)) {
log.log(LogLevel.DEBUG, "My timing values: " + timingValues);
log.log(LogLevel.DEBUG, "Scheduling new request " + delay + " millis from now for " + jrtReqNew.getConfigKey());
}
scheduler.schedule(new GetConfigTask(jrtReqNew, sub), delay, TimeUnit.MILLISECONDS);
}
/**
* Task that can be scheduled in a timer for executing a getConfig request
*/
private class GetConfigTask implements Runnable {
private final JRTClientConfigRequest jrtReq;
private final JRTConfigSubscription<?> sub;
GetConfigTask(JRTClientConfigRequest jrtReq, JRTConfigSubscription<?> sub) {
this.jrtReq = jrtReq;
this.sub = sub;
}
public void run() {
doRequest(sub, jrtReq, jrtReq.getTimeout());
}
}
public void close() {
suspendWarningLogged = Instant.now();
noApplicationWarningLogged = Instant.now();
connectionPool.close();
scheduler.shutdown();
}
private class JRTSourceThreadFactory implements ThreadFactory {
@SuppressWarnings("NullableProblems")
@Override
public Thread newThread(Runnable runnable) {
ThreadFactory tf = Executors.defaultThreadFactory();
Thread t = tf.newThread(runnable);
t.setDaemon(true);
return t;
}
}
@SuppressWarnings("rawtypes")
private static class RequestContext {
final JRTConfigSubscription sub;
final JRTClientConfigRequest jrtReq;
final Connection connection;
private RequestContext(JRTConfigSubscription sub, JRTClientConfigRequest jrtReq, Connection connection) {
this.sub = sub;
this.jrtReq = jrtReq;
this.connection = connection;
}
}
int getTransientFailures() {
return transientFailures;
}
int getFatalFailures() {
return fatalFailures;
}
public ConnectionPool getConnectionPool() {
return connectionPool;
}
private Double getClientTimeout(long serverTimeout) {
return (serverTimeout / 1000.0) + additionalTimeForClientTimeout;
}
} |
Why not just return `OKResponse`? | private HttpResponse putFlagData(HttpRequest request, FlagId flagId) {
flagsDb.setValue(flagId, FlagData.deserialize(request.getData()));
return getFlagData(flagId);
} | return getFlagData(flagId); | private HttpResponse putFlagData(HttpRequest request, FlagId flagId) {
flagsDb.setValue(flagId, FlagData.deserialize(request.getData()));
return new OKResponse();
} | class FlagsHandler extends HttpHandler {
private final FlagsDb flagsDb;
@Inject
public FlagsHandler(LoggingRequestHandler.Context context, FlagsDb flagsDb) {
super(context);
this.flagsDb = flagsDb;
}
@Override
protected HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1")) return new V1Response(flagsV1Uri(request), "data", "defined");
if (path.matches("/flags/v1/data")) return getFlagDataList(request);
if (path.matches("/flags/v1/data/{flagId}")) return getFlagData(findFlagId(request, path));
if (path.matches("/flags/v1/defined")) return getDefinedFlagList(request);
throw new NotFoundException("Nothing at path '" + path + "'");
}
@Override
protected HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1/data/{flagId}")) return putFlagData(request, findFlagId(request, path));
throw new NotFoundException("Nothing at path '" + path + "'");
}
@Override
protected HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1/data/{flagId}")) return deleteFlagData(findFlagId(request, path));
throw new NotFoundException("Nothing at path '" + path + "'");
}
private HttpResponse getDefinedFlagList(HttpRequest request) {
return new DefinedFlags(Flags.getAllFlags());
}
private String flagsV1Uri(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
private HttpResponse getFlagDataList(HttpRequest request) {
return new FlagDataListResponse(flagsV1Uri(request), flagsDb.getAllFlags(),
Objects.equals(request.getProperty("recursive"), "true"));
}
private HttpResponse getFlagData(FlagId flagId) {
FlagData data = flagsDb.getValue(flagId).orElseThrow(() -> new NotFoundException("Flag " + flagId + " not set"));
return new FlagDataResponse(data);
}
private HttpResponse deleteFlagData(FlagId flagId) {
flagsDb.removeValue(flagId);
return new OKResponse();
}
private FlagId findFlagId(HttpRequest request, Path path) {
FlagId flagId = new FlagId(path.get("flagId"));
if (!Objects.equals(request.getProperty("force"), "true")) {
if (Flags.getAllFlags().stream().noneMatch(definition -> flagId.equals(definition.getUnboundFlag().id()))) {
throw new NotFoundException("There is no flag '" + flagId + "' (use ?force=true to override)");
}
}
return flagId;
}
} | class FlagsHandler extends HttpHandler {
private final FlagsDb flagsDb;
@Inject
public FlagsHandler(LoggingRequestHandler.Context context, FlagsDb flagsDb) {
super(context);
this.flagsDb = flagsDb;
}
@Override
protected HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1")) return new V1Response(flagsV1Uri(request), "data", "defined");
if (path.matches("/flags/v1/data")) return getFlagDataList(request);
if (path.matches("/flags/v1/data/{flagId}")) return getFlagData(findFlagId(request, path));
if (path.matches("/flags/v1/defined")) return getDefinedFlagList(request);
throw new NotFoundException("Nothing at path '" + path + "'");
}
@Override
protected HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1/data/{flagId}")) return putFlagData(request, findFlagId(request, path));
throw new NotFoundException("Nothing at path '" + path + "'");
}
@Override
protected HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1/data/{flagId}")) return deleteFlagData(findFlagId(request, path));
throw new NotFoundException("Nothing at path '" + path + "'");
}
private HttpResponse getDefinedFlagList(HttpRequest request) {
return new DefinedFlags(Flags.getAllFlags());
}
private String flagsV1Uri(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
private HttpResponse getFlagDataList(HttpRequest request) {
return new FlagDataListResponse(flagsV1Uri(request), flagsDb.getAllFlags(),
Objects.equals(request.getProperty("recursive"), "true"));
}
private HttpResponse getFlagData(FlagId flagId) {
FlagData data = flagsDb.getValue(flagId).orElseThrow(() -> new NotFoundException("Flag " + flagId + " not set"));
return new FlagDataResponse(data);
}
private HttpResponse deleteFlagData(FlagId flagId) {
flagsDb.removeValue(flagId);
return new OKResponse();
}
private FlagId findFlagId(HttpRequest request, Path path) {
FlagId flagId = new FlagId(path.get("flagId"));
if (!Objects.equals(request.getProperty("force"), "true")) {
if (Flags.getAllFlags().stream().noneMatch(definition -> flagId.equals(definition.getUnboundFlag().id()))) {
throw new NotFoundException("There is no flag '" + flagId + "' (use ?force=true to override)");
}
}
return flagId;
}
} |
This should include the ID of the flag, otherwise it's difficult to work with on the different clients that will use this. Then, then recursive data list can just be an array instead of an object. | public void testData() {
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
"{\n" +
" \"rules\": [\n" +
" {\n" +
" \"value\": true\n" +
" }\n" +
" ]\n" +
"}",
"{\"rules\":[{\"value\":true}]}");
verifySuccessfulRequest(Method.GET, "/data/" + FLAG1.id(),
"", "{\"rules\":[{\"value\":true}]}");
verifySuccessfulRequest(Method.GET, "/data",
"", "{\"id1\":{\"url\":\"https:
verifySuccessfulRequest(Method.GET, "/data/",
"", "{\"id1\":{\"url\":\"https:
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG2.id(),
"{\n" +
" \"rules\": [\n" +
" {\n" +
" \"conditions\": [\n" +
" {\n" +
" \"type\": \"whitelist\",\n" +
" \"dimension\": \"hostname\",\n" +
" \"values\": [ \"host1\", \"host2\" ]\n" +
" },\n" +
" {\n" +
" \"type\": \"blacklist\",\n" +
" \"dimension\": \"application\",\n" +
" \"values\": [ \"app1\", \"app2\" ]\n" +
" }\n" +
" ],\n" +
" \"value\": true\n" +
" }\n" +
" ],\n" +
" \"attributes\": {\n" +
" \"zone\": \"zone1\"\n" +
" }\n" +
"}\n",
"{\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"host1\",\"host2\"]},{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"app2\",\"app1\"]}],\"value\":true}],\"attributes\":{\"zone\":\"zone1\"}}");
verifySuccessfulRequest(Method.GET, "/data",
"",
"{\"id1\":{\"url\":\"https:
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
"{\n" +
" \"rules\": [\n" +
" {\n" +
" \"value\": false\n" +
" }\n" +
" ]\n" +
"}\n",
"{\"rules\":[{\"value\":false}]}");
verifySuccessfulRequest(Method.GET, "/data?recursive=true", "",
"{\"id1\":{\"rules\":[{\"value\":false}]},\"id2\":{\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"host1\",\"host2\"]},{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"app2\",\"app1\"]}],\"value\":true}],\"attributes\":{\"zone\":\"zone1\"}}}");
verifySuccessfulRequest(Method.DELETE, "/data/" + FLAG1.id(), "", "");
verifySuccessfulRequest(Method.DELETE, "/data/" + FLAG2.id(), "", "");
verifySuccessfulRequest(Method.GET, "/data", "", "{}");
} | "", "{\"rules\":[{\"value\":true}]}"); | public void testData() {
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
"{\n" +
" \"id\": \"id1\",\n" +
" \"rules\": [\n" +
" {\n" +
" \"value\": true\n" +
" }\n" +
" ]\n" +
"}",
"");
verifySuccessfulRequest(Method.GET, "/data/" + FLAG1.id(),
"", "{\"id\":\"id1\",\"rules\":[{\"value\":true}]}");
verifySuccessfulRequest(Method.GET, "/data",
"", "{\"flags\":[{\"id\":\"id1\",\"url\":\"https:
verifySuccessfulRequest(Method.GET, "/data/",
"", "{\"flags\":[{\"id\":\"id1\",\"url\":\"https:
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG2.id(),
"{\n" +
" \"id\": \"id2\",\n" +
" \"rules\": [\n" +
" {\n" +
" \"conditions\": [\n" +
" {\n" +
" \"type\": \"whitelist\",\n" +
" \"dimension\": \"hostname\",\n" +
" \"values\": [ \"host1\", \"host2\" ]\n" +
" },\n" +
" {\n" +
" \"type\": \"blacklist\",\n" +
" \"dimension\": \"application\",\n" +
" \"values\": [ \"app1\", \"app2\" ]\n" +
" }\n" +
" ],\n" +
" \"value\": true\n" +
" }\n" +
" ],\n" +
" \"attributes\": {\n" +
" \"zone\": \"zone1\"\n" +
" }\n" +
"}\n",
"");
verifySuccessfulRequest(Method.GET, "/data/" + FLAG2.id(), "",
"{\"id\":\"id2\",\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"host1\",\"host2\"]},{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"app2\",\"app1\"]}],\"value\":true}],\"attributes\":{\"zone\":\"zone1\"}}");
verifySuccessfulRequest(Method.GET, "/data",
"",
"{\"flags\":[{\"id\":\"id1\",\"url\":\"https:
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
"{\n" +
" \"id\": \"id1\",\n" +
" \"rules\": [\n" +
" {\n" +
" \"value\": false\n" +
" }\n" +
" ]\n" +
"}\n",
"");
verifySuccessfulRequest(Method.GET, "/data/" + FLAG1.id(), "", "{\"id\":\"id1\",\"rules\":[{\"value\":false}]}");
verifySuccessfulRequest(Method.GET, "/data?recursive=true", "",
"{\"flags\":[{\"id\":\"id1\",\"rules\":[{\"value\":false}]},{\"id\":\"id2\",\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"host1\",\"host2\"]},{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"app2\",\"app1\"]}],\"value\":true}],\"attributes\":{\"zone\":\"zone1\"}}]}");
verifySuccessfulRequest(Method.DELETE, "/data/" + FLAG1.id(), "", "");
verifySuccessfulRequest(Method.DELETE, "/data/" + FLAG2.id(), "", "");
verifySuccessfulRequest(Method.GET, "/data", "", "{\"flags\":[]}");
} | class FlagsHandlerTest {
private static final UnboundFlag<Boolean> FLAG1 =
Flags.defineBoolean("id1", false, "desc1", "mod1");
private static final UnboundFlag<Boolean> FLAG2 =
Flags.defineBoolean("id2", true, "desc2", "mod2",
FetchVector.Dimension.HOSTNAME, FetchVector.Dimension.APPLICATION_ID);
private static final String FLAGS_V1_URL = "https:
private final FlagsDbImpl flagsDb = new FlagsDbImpl(new MockCurator());
private final FlagsHandler handler = new FlagsHandler(FlagsHandler.testOnlyContext(), flagsDb);
@Test
public void testV1() {
String expectedResponse = "{" +
Stream.of("data", "defined")
.map(name -> "\"" + name + "\":{\"url\":\"https:
.collect(Collectors.joining(",")) +
"}";
verifySuccessfulRequest(Method.GET, "", "", expectedResponse);
verifySuccessfulRequest(Method.GET, "/", "", expectedResponse);
}
@Test
public void testDefined() {
try (Flags.Replacer replacer = Flags.clearFlagsForTesting()) {
fixUnusedWarning(replacer);
Flags.defineBoolean("id", false, "desc", "mod", FetchVector.Dimension.HOSTNAME);
verifySuccessfulRequest(Method.GET, "/defined", "",
"{\"id\":{\"description\":\"desc\",\"modification-effect\":\"mod\",\"dimensions\":[\"hostname\"]}}");
}
}
private void fixUnusedWarning(Flags.Replacer replacer) { }
@Test
@Test
public void testForcing() {
FlagId undefinedFlagId = new FlagId("undef");
HttpResponse response = handle(Method.PUT, "/data/" + undefinedFlagId, "");
assertEquals(404, response.getStatus());
assertEquals("application/json", response.getContentType());
}
private void verifySuccessfulRequest(Method method, String pathSuffix, String requestBody, String expectedResponseBody) {
HttpResponse response = handle(method, pathSuffix, requestBody);
assertEquals(200, response.getStatus());
assertEquals("application/json", response.getContentType());
String actualResponse = uncheck(() -> SessionHandlerTest.getRenderedString(response));
assertThat(actualResponse, is(expectedResponseBody));
}
private HttpResponse handle(Method method, String pathSuffix, String requestBody) {
String uri = FLAGS_V1_URL + pathSuffix;
HttpRequest request = HttpRequest.createTestRequest(uri, method, makeInputStream(requestBody));
return handler.handle(request);
}
private String makeUrl(String component) {
return FLAGS_V1_URL + "/" + component;
}
private InputStream makeInputStream(String content) {
return new ByteArrayInputStream(Utf8.toBytes(content));
}
} | class FlagsHandlerTest {
private static final UnboundFlag<Boolean> FLAG1 =
Flags.defineBoolean("id1", false, "desc1", "mod1");
private static final UnboundFlag<Boolean> FLAG2 =
Flags.defineBoolean("id2", true, "desc2", "mod2",
FetchVector.Dimension.HOSTNAME, FetchVector.Dimension.APPLICATION_ID);
private static final String FLAGS_V1_URL = "https:
private final FlagsDbImpl flagsDb = new FlagsDbImpl(new MockCurator());
private final FlagsHandler handler = new FlagsHandler(FlagsHandler.testOnlyContext(), flagsDb);
@Test
public void testV1() {
String expectedResponse = "{" +
Stream.of("data", "defined")
.map(name -> "\"" + name + "\":{\"url\":\"https:
.collect(Collectors.joining(",")) +
"}";
verifySuccessfulRequest(Method.GET, "", "", expectedResponse);
verifySuccessfulRequest(Method.GET, "/", "", expectedResponse);
}
@Test
public void testDefined() {
try (Flags.Replacer replacer = Flags.clearFlagsForTesting()) {
fixUnusedWarning(replacer);
Flags.defineBoolean("id", false, "desc", "mod", FetchVector.Dimension.HOSTNAME);
verifySuccessfulRequest(Method.GET, "/defined", "",
"{\"id\":{\"description\":\"desc\",\"modification-effect\":\"mod\",\"dimensions\":[\"hostname\"]}}");
}
}
private void fixUnusedWarning(Flags.Replacer replacer) { }
@Test
@Test
public void testForcing() {
FlagId undefinedFlagId = new FlagId("undef");
HttpResponse response = handle(Method.PUT, "/data/" + undefinedFlagId, "");
assertEquals(404, response.getStatus());
assertEquals("application/json", response.getContentType());
}
private void verifySuccessfulRequest(Method method, String pathSuffix, String requestBody, String expectedResponseBody) {
HttpResponse response = handle(method, pathSuffix, requestBody);
assertEquals(200, response.getStatus());
assertEquals("application/json", response.getContentType());
String actualResponse = uncheck(() -> SessionHandlerTest.getRenderedString(response));
assertThat(actualResponse, is(expectedResponseBody));
}
private HttpResponse handle(Method method, String pathSuffix, String requestBody) {
String uri = FLAGS_V1_URL + pathSuffix;
HttpRequest request = HttpRequest.createTestRequest(uri, method, makeInputStream(requestBody));
return handler.handle(request);
}
private String makeUrl(String component) {
return FLAGS_V1_URL + "/" + component;
}
private InputStream makeInputStream(String content) {
return new ByteArrayInputStream(Utf8.toBytes(content));
}
} |
Use `EnumMap`? | public static FetchVector fromMap(Map<Dimension, String> map) {
return new FetchVector(new HashMap<>(map));
} | return new FetchVector(new HashMap<>(map)); | public static FetchVector fromMap(Map<Dimension, String> map) {
return new FetchVector(new HashMap<>(map));
} | class FetchVector {
public enum Dimension {
/** Value from ZoneId::value */
ZONE_ID,
/** Value from ApplicationId::serializedForm */
APPLICATION_ID,
/** Fully qualified hostname */
HOSTNAME
}
private final Map<Dimension, String> map;
public FetchVector() {
this.map = Collections.emptyMap();
}
private FetchVector(Map<Dimension, String> map) {
this.map = Collections.unmodifiableMap(map);
}
public Optional<String> getValue(Dimension dimension) {
return Optional.ofNullable(map.get(dimension));
}
public Map<Dimension, String> toMap() {
return map;
}
/** Returns a new FetchVector, identical to {@code this} except for its value in {@code dimension}. */
public FetchVector with(Dimension dimension, String value) {
return makeFetchVector(merged -> merged.put(dimension, value));
}
/** Returns a new FetchVector, identical to {@code this} except for its values in the override's dimensions. */
public FetchVector with(FetchVector override) {
return makeFetchVector(vector -> vector.putAll(override.map));
}
private FetchVector makeFetchVector(Consumer<EnumMap<Dimension, String>> mapModifier) {
EnumMap<Dimension, String> mergedMap = new EnumMap<>(Dimension.class);
mergedMap.putAll(map);
mapModifier.accept(mergedMap);
return new FetchVector(mergedMap);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FetchVector that = (FetchVector) o;
return Objects.equals(map, that.map);
}
@Override
public int hashCode() {
return Objects.hash(map);
}
} | class FetchVector {
public enum Dimension {
/** Value from ZoneId::value */
ZONE_ID,
/** Value from ApplicationId::serializedForm */
APPLICATION_ID,
/** Fully qualified hostname */
HOSTNAME
}
private final Map<Dimension, String> map;
public FetchVector() {
this.map = Collections.emptyMap();
}
private FetchVector(Map<Dimension, String> map) {
this.map = Collections.unmodifiableMap(map);
}
public Optional<String> getValue(Dimension dimension) {
return Optional.ofNullable(map.get(dimension));
}
public Map<Dimension, String> toMap() {
return map;
}
/** Returns a new FetchVector, identical to {@code this} except for its value in {@code dimension}. */
public FetchVector with(Dimension dimension, String value) {
return makeFetchVector(merged -> merged.put(dimension, value));
}
/** Returns a new FetchVector, identical to {@code this} except for its values in the override's dimensions. */
public FetchVector with(FetchVector override) {
return makeFetchVector(vector -> vector.putAll(override.map));
}
private FetchVector makeFetchVector(Consumer<EnumMap<Dimension, String>> mapModifier) {
EnumMap<Dimension, String> mergedMap = new EnumMap<>(Dimension.class);
mergedMap.putAll(map);
mapModifier.accept(mergedMap);
return new FetchVector(mergedMap);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FetchVector that = (FetchVector) o;
return Objects.equals(map, that.map);
}
@Override
public int hashCode() {
return Objects.hash(map);
}
} |
Fixed | public void test() {
MockCurator curator = new MockCurator();
FlagsDbImpl db = new FlagsDbImpl(curator);
Condition condition1 = new Condition(Condition.Type.WHITELIST, FetchVector.Dimension.HOSTNAME, "host1");
Rule rule1 = new Rule(Optional.of(JsonNodeRawFlag.fromJson("13")), condition1);
FlagData data = new FlagData(new FetchVector().with(FetchVector.Dimension.ZONE_ID, "zone-a"), rule1);
FlagId flagId = new FlagId("id");
db.setValue(flagId, data);
assertTrue(db.getValue(flagId).isPresent());
Optional<FlagData> dataCopy = db.getValue(flagId);
assertTrue(dataCopy.isPresent());
assertEquals("{\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\"," +
"\"values\":[\"host1\"]}],\"value\":13}],\"attributes\":{\"zone\":\"zone-a\"}}",
dataCopy.get().serializeToJson());
FlagId flagId2 = new FlagId("id2");
db.setValue(flagId2, data);
Map<FlagId, FlagData> flags = db.getAllFlags();
assertThat(flags.size(), equalTo(2));
assertThat(flags.get(flagId), notNullValue());
assertThat(flags.get(flagId2), notNullValue());
db.removeValue(flagId2);
assertFalse(db.getValue(flagId2).isPresent());
} | assertTrue(db.getValue(flagId).isPresent()); | public void test() {
MockCurator curator = new MockCurator();
FlagsDbImpl db = new FlagsDbImpl(curator);
Condition condition1 = new Condition(Condition.Type.WHITELIST, FetchVector.Dimension.HOSTNAME, "host1");
Rule rule1 = new Rule(Optional.of(JsonNodeRawFlag.fromJson("13")), condition1);
FlagId flagId = new FlagId("id");
FlagData data = new FlagData(flagId, new FetchVector().with(FetchVector.Dimension.ZONE_ID, "zone-a"), rule1);
db.setValue(flagId, data);
Optional<FlagData> dataCopy = db.getValue(flagId);
assertTrue(dataCopy.isPresent());
assertEquals("{\"id\":\"id\",\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\"," +
"\"values\":[\"host1\"]}],\"value\":13}],\"attributes\":{\"zone\":\"zone-a\"}}",
dataCopy.get().serializeToJson());
FlagId flagId2 = new FlagId("id2");
db.setValue(flagId2, data);
Map<FlagId, FlagData> flags = db.getAllFlags();
assertThat(flags.size(), equalTo(2));
assertThat(flags.get(flagId), notNullValue());
assertThat(flags.get(flagId2), notNullValue());
db.removeValue(flagId2);
assertFalse(db.getValue(flagId2).isPresent());
} | class FlagsDbImplTest {
@Test
} | class FlagsDbImplTest {
@Test
} |
Fixed | public String getContentType() {
return "application/json";
} | return "application/json"; | public String getContentType() {
return HttpConfigResponse.JSON_CONTENT_TYPE;
} | class FlagDataListResponse extends HttpResponse {
private static ObjectMapper mapper = new ObjectMapper();
private final String flagsV1Uri;
private final Map<FlagId, FlagData> flags;
private final boolean showDataInsteadOfUrl;
public FlagDataListResponse(String flagsV1Uri, Map<FlagId, FlagData> flags, boolean showDataInsteadOfUrl) {
super(Response.Status.OK);
this.flagsV1Uri = flagsV1Uri;
this.flags = flags;
this.showDataInsteadOfUrl = showDataInsteadOfUrl;
}
@Override
public void render(OutputStream outputStream) {
ObjectNode rootNode = mapper.createObjectNode();
new TreeMap<>(flags).forEach((flagId, flagData) -> {
if (showDataInsteadOfUrl) {
rootNode.set(flagId.toString(), flagData.toJsonNode());
} else {
rootNode.putObject(flagId.toString()).put("url", flagsV1Uri + "/data/" + flagId.toString());
}
});
uncheck(() -> mapper.writeValue(outputStream, rootNode));
}
@Override
} | class FlagDataListResponse extends HttpResponse {
private static ObjectMapper mapper = new ObjectMapper();
private final String flagsV1Uri;
private final Map<FlagId, FlagData> flags;
private final boolean recursive;
public FlagDataListResponse(String flagsV1Uri, Map<FlagId, FlagData> flags, boolean recursive) {
super(Response.Status.OK);
this.flagsV1Uri = flagsV1Uri;
this.flags = flags;
this.recursive = recursive;
}
@Override
public void render(OutputStream outputStream) {
ObjectNode rootNode = mapper.createObjectNode();
ArrayNode flagsArray = rootNode.putArray("flags");
new TreeMap<>(this.flags).forEach((flagId, flagData) -> {
if (recursive) {
flagsArray.add(flagData.toJsonNode());
} else {
ObjectNode object = flagsArray.addObject();
object.put("id", flagId.toString());
object.put("url", flagsV1Uri + "/data/" + flagId.toString());
}
});
uncheck(() -> mapper.writeValue(outputStream, rootNode));
}
@Override
} |
what was put may not be identical to what you get (due to stray fields, ordering, etc), so I thought it was nice to return the actually stored data? Let me remove it... | private HttpResponse putFlagData(HttpRequest request, FlagId flagId) {
flagsDb.setValue(flagId, FlagData.deserialize(request.getData()));
return getFlagData(flagId);
} | return getFlagData(flagId); | private HttpResponse putFlagData(HttpRequest request, FlagId flagId) {
flagsDb.setValue(flagId, FlagData.deserialize(request.getData()));
return new OKResponse();
} | class FlagsHandler extends HttpHandler {
private final FlagsDb flagsDb;
@Inject
public FlagsHandler(LoggingRequestHandler.Context context, FlagsDb flagsDb) {
super(context);
this.flagsDb = flagsDb;
}
@Override
protected HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1")) return new V1Response(flagsV1Uri(request), "data", "defined");
if (path.matches("/flags/v1/data")) return getFlagDataList(request);
if (path.matches("/flags/v1/data/{flagId}")) return getFlagData(findFlagId(request, path));
if (path.matches("/flags/v1/defined")) return getDefinedFlagList(request);
throw new NotFoundException("Nothing at path '" + path + "'");
}
@Override
protected HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1/data/{flagId}")) return putFlagData(request, findFlagId(request, path));
throw new NotFoundException("Nothing at path '" + path + "'");
}
@Override
protected HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1/data/{flagId}")) return deleteFlagData(findFlagId(request, path));
throw new NotFoundException("Nothing at path '" + path + "'");
}
private HttpResponse getDefinedFlagList(HttpRequest request) {
return new DefinedFlags(Flags.getAllFlags());
}
private String flagsV1Uri(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
private HttpResponse getFlagDataList(HttpRequest request) {
return new FlagDataListResponse(flagsV1Uri(request), flagsDb.getAllFlags(),
Objects.equals(request.getProperty("recursive"), "true"));
}
private HttpResponse getFlagData(FlagId flagId) {
FlagData data = flagsDb.getValue(flagId).orElseThrow(() -> new NotFoundException("Flag " + flagId + " not set"));
return new FlagDataResponse(data);
}
private HttpResponse deleteFlagData(FlagId flagId) {
flagsDb.removeValue(flagId);
return new OKResponse();
}
private FlagId findFlagId(HttpRequest request, Path path) {
FlagId flagId = new FlagId(path.get("flagId"));
if (!Objects.equals(request.getProperty("force"), "true")) {
if (Flags.getAllFlags().stream().noneMatch(definition -> flagId.equals(definition.getUnboundFlag().id()))) {
throw new NotFoundException("There is no flag '" + flagId + "' (use ?force=true to override)");
}
}
return flagId;
}
} | class FlagsHandler extends HttpHandler {
private final FlagsDb flagsDb;
@Inject
public FlagsHandler(LoggingRequestHandler.Context context, FlagsDb flagsDb) {
super(context);
this.flagsDb = flagsDb;
}
@Override
protected HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1")) return new V1Response(flagsV1Uri(request), "data", "defined");
if (path.matches("/flags/v1/data")) return getFlagDataList(request);
if (path.matches("/flags/v1/data/{flagId}")) return getFlagData(findFlagId(request, path));
if (path.matches("/flags/v1/defined")) return getDefinedFlagList(request);
throw new NotFoundException("Nothing at path '" + path + "'");
}
@Override
protected HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1/data/{flagId}")) return putFlagData(request, findFlagId(request, path));
throw new NotFoundException("Nothing at path '" + path + "'");
}
@Override
protected HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/flags/v1/data/{flagId}")) return deleteFlagData(findFlagId(request, path));
throw new NotFoundException("Nothing at path '" + path + "'");
}
private HttpResponse getDefinedFlagList(HttpRequest request) {
return new DefinedFlags(Flags.getAllFlags());
}
private String flagsV1Uri(HttpRequest request) {
URI uri = request.getUri();
return uri.getScheme() + ":
}
private HttpResponse getFlagDataList(HttpRequest request) {
return new FlagDataListResponse(flagsV1Uri(request), flagsDb.getAllFlags(),
Objects.equals(request.getProperty("recursive"), "true"));
}
private HttpResponse getFlagData(FlagId flagId) {
FlagData data = flagsDb.getValue(flagId).orElseThrow(() -> new NotFoundException("Flag " + flagId + " not set"));
return new FlagDataResponse(data);
}
private HttpResponse deleteFlagData(FlagId flagId) {
flagsDb.removeValue(flagId);
return new OKResponse();
}
private FlagId findFlagId(HttpRequest request, Path path) {
FlagId flagId = new FlagId(path.get("flagId"));
if (!Objects.equals(request.getProperty("force"), "true")) {
if (Flags.getAllFlags().stream().noneMatch(definition -> flagId.equals(definition.getUnboundFlag().id()))) {
throw new NotFoundException("There is no flag '" + flagId + "' (use ?force=true to override)");
}
}
return flagId;
}
} |
Unfortunately the EnumMap has a counterintuitive constructor taking a Map: If no element is found in the map, an IllegalArgumentException is thrown (because it's unable to get the Class without any instances), and this is detected at run-time which is uncomfortable. Just seems simpler to use Map. | public static FetchVector fromMap(Map<Dimension, String> map) {
return new FetchVector(new HashMap<>(map));
} | return new FetchVector(new HashMap<>(map)); | public static FetchVector fromMap(Map<Dimension, String> map) {
return new FetchVector(new HashMap<>(map));
} | class FetchVector {
public enum Dimension {
/** Value from ZoneId::value */
ZONE_ID,
/** Value from ApplicationId::serializedForm */
APPLICATION_ID,
/** Fully qualified hostname */
HOSTNAME
}
private final Map<Dimension, String> map;
public FetchVector() {
this.map = Collections.emptyMap();
}
private FetchVector(Map<Dimension, String> map) {
this.map = Collections.unmodifiableMap(map);
}
public Optional<String> getValue(Dimension dimension) {
return Optional.ofNullable(map.get(dimension));
}
public Map<Dimension, String> toMap() {
return map;
}
/** Returns a new FetchVector, identical to {@code this} except for its value in {@code dimension}. */
public FetchVector with(Dimension dimension, String value) {
return makeFetchVector(merged -> merged.put(dimension, value));
}
/** Returns a new FetchVector, identical to {@code this} except for its values in the override's dimensions. */
public FetchVector with(FetchVector override) {
return makeFetchVector(vector -> vector.putAll(override.map));
}
private FetchVector makeFetchVector(Consumer<EnumMap<Dimension, String>> mapModifier) {
EnumMap<Dimension, String> mergedMap = new EnumMap<>(Dimension.class);
mergedMap.putAll(map);
mapModifier.accept(mergedMap);
return new FetchVector(mergedMap);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FetchVector that = (FetchVector) o;
return Objects.equals(map, that.map);
}
@Override
public int hashCode() {
return Objects.hash(map);
}
} | class FetchVector {
public enum Dimension {
/** Value from ZoneId::value */
ZONE_ID,
/** Value from ApplicationId::serializedForm */
APPLICATION_ID,
/** Fully qualified hostname */
HOSTNAME
}
private final Map<Dimension, String> map;
public FetchVector() {
this.map = Collections.emptyMap();
}
private FetchVector(Map<Dimension, String> map) {
this.map = Collections.unmodifiableMap(map);
}
public Optional<String> getValue(Dimension dimension) {
return Optional.ofNullable(map.get(dimension));
}
public Map<Dimension, String> toMap() {
return map;
}
/** Returns a new FetchVector, identical to {@code this} except for its value in {@code dimension}. */
public FetchVector with(Dimension dimension, String value) {
return makeFetchVector(merged -> merged.put(dimension, value));
}
/** Returns a new FetchVector, identical to {@code this} except for its values in the override's dimensions. */
public FetchVector with(FetchVector override) {
return makeFetchVector(vector -> vector.putAll(override.map));
}
private FetchVector makeFetchVector(Consumer<EnumMap<Dimension, String>> mapModifier) {
EnumMap<Dimension, String> mergedMap = new EnumMap<>(Dimension.class);
mergedMap.putAll(map);
mapModifier.accept(mergedMap);
return new FetchVector(mergedMap);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FetchVector that = (FetchVector) o;
return Objects.equals(map, that.map);
}
@Override
public int hashCode() {
return Objects.hash(map);
}
} |
Done | public void testData() {
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
"{\n" +
" \"rules\": [\n" +
" {\n" +
" \"value\": true\n" +
" }\n" +
" ]\n" +
"}",
"{\"rules\":[{\"value\":true}]}");
verifySuccessfulRequest(Method.GET, "/data/" + FLAG1.id(),
"", "{\"rules\":[{\"value\":true}]}");
verifySuccessfulRequest(Method.GET, "/data",
"", "{\"id1\":{\"url\":\"https:
verifySuccessfulRequest(Method.GET, "/data/",
"", "{\"id1\":{\"url\":\"https:
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG2.id(),
"{\n" +
" \"rules\": [\n" +
" {\n" +
" \"conditions\": [\n" +
" {\n" +
" \"type\": \"whitelist\",\n" +
" \"dimension\": \"hostname\",\n" +
" \"values\": [ \"host1\", \"host2\" ]\n" +
" },\n" +
" {\n" +
" \"type\": \"blacklist\",\n" +
" \"dimension\": \"application\",\n" +
" \"values\": [ \"app1\", \"app2\" ]\n" +
" }\n" +
" ],\n" +
" \"value\": true\n" +
" }\n" +
" ],\n" +
" \"attributes\": {\n" +
" \"zone\": \"zone1\"\n" +
" }\n" +
"}\n",
"{\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"host1\",\"host2\"]},{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"app2\",\"app1\"]}],\"value\":true}],\"attributes\":{\"zone\":\"zone1\"}}");
verifySuccessfulRequest(Method.GET, "/data",
"",
"{\"id1\":{\"url\":\"https:
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
"{\n" +
" \"rules\": [\n" +
" {\n" +
" \"value\": false\n" +
" }\n" +
" ]\n" +
"}\n",
"{\"rules\":[{\"value\":false}]}");
verifySuccessfulRequest(Method.GET, "/data?recursive=true", "",
"{\"id1\":{\"rules\":[{\"value\":false}]},\"id2\":{\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"host1\",\"host2\"]},{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"app2\",\"app1\"]}],\"value\":true}],\"attributes\":{\"zone\":\"zone1\"}}}");
verifySuccessfulRequest(Method.DELETE, "/data/" + FLAG1.id(), "", "");
verifySuccessfulRequest(Method.DELETE, "/data/" + FLAG2.id(), "", "");
verifySuccessfulRequest(Method.GET, "/data", "", "{}");
} | "", "{\"rules\":[{\"value\":true}]}"); | public void testData() {
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
"{\n" +
" \"id\": \"id1\",\n" +
" \"rules\": [\n" +
" {\n" +
" \"value\": true\n" +
" }\n" +
" ]\n" +
"}",
"");
verifySuccessfulRequest(Method.GET, "/data/" + FLAG1.id(),
"", "{\"id\":\"id1\",\"rules\":[{\"value\":true}]}");
verifySuccessfulRequest(Method.GET, "/data",
"", "{\"flags\":[{\"id\":\"id1\",\"url\":\"https:
verifySuccessfulRequest(Method.GET, "/data/",
"", "{\"flags\":[{\"id\":\"id1\",\"url\":\"https:
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG2.id(),
"{\n" +
" \"id\": \"id2\",\n" +
" \"rules\": [\n" +
" {\n" +
" \"conditions\": [\n" +
" {\n" +
" \"type\": \"whitelist\",\n" +
" \"dimension\": \"hostname\",\n" +
" \"values\": [ \"host1\", \"host2\" ]\n" +
" },\n" +
" {\n" +
" \"type\": \"blacklist\",\n" +
" \"dimension\": \"application\",\n" +
" \"values\": [ \"app1\", \"app2\" ]\n" +
" }\n" +
" ],\n" +
" \"value\": true\n" +
" }\n" +
" ],\n" +
" \"attributes\": {\n" +
" \"zone\": \"zone1\"\n" +
" }\n" +
"}\n",
"");
verifySuccessfulRequest(Method.GET, "/data/" + FLAG2.id(), "",
"{\"id\":\"id2\",\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"host1\",\"host2\"]},{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"app2\",\"app1\"]}],\"value\":true}],\"attributes\":{\"zone\":\"zone1\"}}");
verifySuccessfulRequest(Method.GET, "/data",
"",
"{\"flags\":[{\"id\":\"id1\",\"url\":\"https:
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
"{\n" +
" \"id\": \"id1\",\n" +
" \"rules\": [\n" +
" {\n" +
" \"value\": false\n" +
" }\n" +
" ]\n" +
"}\n",
"");
verifySuccessfulRequest(Method.GET, "/data/" + FLAG1.id(), "", "{\"id\":\"id1\",\"rules\":[{\"value\":false}]}");
verifySuccessfulRequest(Method.GET, "/data?recursive=true", "",
"{\"flags\":[{\"id\":\"id1\",\"rules\":[{\"value\":false}]},{\"id\":\"id2\",\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"hostname\",\"values\":[\"host1\",\"host2\"]},{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"app2\",\"app1\"]}],\"value\":true}],\"attributes\":{\"zone\":\"zone1\"}}]}");
verifySuccessfulRequest(Method.DELETE, "/data/" + FLAG1.id(), "", "");
verifySuccessfulRequest(Method.DELETE, "/data/" + FLAG2.id(), "", "");
verifySuccessfulRequest(Method.GET, "/data", "", "{\"flags\":[]}");
} | class FlagsHandlerTest {
private static final UnboundFlag<Boolean> FLAG1 =
Flags.defineBoolean("id1", false, "desc1", "mod1");
private static final UnboundFlag<Boolean> FLAG2 =
Flags.defineBoolean("id2", true, "desc2", "mod2",
FetchVector.Dimension.HOSTNAME, FetchVector.Dimension.APPLICATION_ID);
private static final String FLAGS_V1_URL = "https:
private final FlagsDbImpl flagsDb = new FlagsDbImpl(new MockCurator());
private final FlagsHandler handler = new FlagsHandler(FlagsHandler.testOnlyContext(), flagsDb);
@Test
public void testV1() {
String expectedResponse = "{" +
Stream.of("data", "defined")
.map(name -> "\"" + name + "\":{\"url\":\"https:
.collect(Collectors.joining(",")) +
"}";
verifySuccessfulRequest(Method.GET, "", "", expectedResponse);
verifySuccessfulRequest(Method.GET, "/", "", expectedResponse);
}
@Test
public void testDefined() {
try (Flags.Replacer replacer = Flags.clearFlagsForTesting()) {
fixUnusedWarning(replacer);
Flags.defineBoolean("id", false, "desc", "mod", FetchVector.Dimension.HOSTNAME);
verifySuccessfulRequest(Method.GET, "/defined", "",
"{\"id\":{\"description\":\"desc\",\"modification-effect\":\"mod\",\"dimensions\":[\"hostname\"]}}");
}
}
private void fixUnusedWarning(Flags.Replacer replacer) { }
@Test
@Test
public void testForcing() {
FlagId undefinedFlagId = new FlagId("undef");
HttpResponse response = handle(Method.PUT, "/data/" + undefinedFlagId, "");
assertEquals(404, response.getStatus());
assertEquals("application/json", response.getContentType());
}
private void verifySuccessfulRequest(Method method, String pathSuffix, String requestBody, String expectedResponseBody) {
HttpResponse response = handle(method, pathSuffix, requestBody);
assertEquals(200, response.getStatus());
assertEquals("application/json", response.getContentType());
String actualResponse = uncheck(() -> SessionHandlerTest.getRenderedString(response));
assertThat(actualResponse, is(expectedResponseBody));
}
private HttpResponse handle(Method method, String pathSuffix, String requestBody) {
String uri = FLAGS_V1_URL + pathSuffix;
HttpRequest request = HttpRequest.createTestRequest(uri, method, makeInputStream(requestBody));
return handler.handle(request);
}
private String makeUrl(String component) {
return FLAGS_V1_URL + "/" + component;
}
private InputStream makeInputStream(String content) {
return new ByteArrayInputStream(Utf8.toBytes(content));
}
} | class FlagsHandlerTest {
private static final UnboundFlag<Boolean> FLAG1 =
Flags.defineBoolean("id1", false, "desc1", "mod1");
private static final UnboundFlag<Boolean> FLAG2 =
Flags.defineBoolean("id2", true, "desc2", "mod2",
FetchVector.Dimension.HOSTNAME, FetchVector.Dimension.APPLICATION_ID);
private static final String FLAGS_V1_URL = "https:
private final FlagsDbImpl flagsDb = new FlagsDbImpl(new MockCurator());
private final FlagsHandler handler = new FlagsHandler(FlagsHandler.testOnlyContext(), flagsDb);
@Test
public void testV1() {
String expectedResponse = "{" +
Stream.of("data", "defined")
.map(name -> "\"" + name + "\":{\"url\":\"https:
.collect(Collectors.joining(",")) +
"}";
verifySuccessfulRequest(Method.GET, "", "", expectedResponse);
verifySuccessfulRequest(Method.GET, "/", "", expectedResponse);
}
@Test
public void testDefined() {
try (Flags.Replacer replacer = Flags.clearFlagsForTesting()) {
fixUnusedWarning(replacer);
Flags.defineBoolean("id", false, "desc", "mod", FetchVector.Dimension.HOSTNAME);
verifySuccessfulRequest(Method.GET, "/defined", "",
"{\"id\":{\"description\":\"desc\",\"modification-effect\":\"mod\",\"dimensions\":[\"hostname\"]}}");
}
}
private void fixUnusedWarning(Flags.Replacer replacer) { }
@Test
@Test
public void testForcing() {
FlagId undefinedFlagId = new FlagId("undef");
HttpResponse response = handle(Method.PUT, "/data/" + undefinedFlagId, "");
assertEquals(404, response.getStatus());
assertEquals("application/json", response.getContentType());
}
private void verifySuccessfulRequest(Method method, String pathSuffix, String requestBody, String expectedResponseBody) {
HttpResponse response = handle(method, pathSuffix, requestBody);
assertEquals(200, response.getStatus());
assertEquals("application/json", response.getContentType());
String actualResponse = uncheck(() -> SessionHandlerTest.getRenderedString(response));
assertThat(actualResponse, is(expectedResponseBody));
}
private HttpResponse handle(Method method, String pathSuffix, String requestBody) {
String uri = FLAGS_V1_URL + pathSuffix;
HttpRequest request = HttpRequest.createTestRequest(uri, method, makeInputStream(requestBody));
return handler.handle(request);
}
private String makeUrl(String component) {
return FLAGS_V1_URL + "/" + component;
}
private InputStream makeInputStream(String content) {
return new ByteArrayInputStream(Utf8.toBytes(content));
}
} |
intended? | static private String getMediaType(HttpRequest request) {
String header = request.getHeader(com.yahoo.jdisc.http.HttpHeaders.Names.CONTENT_TYPE);
if (header == null) {
return "";
}
System.err.println("header is: "+header);
int semi = header.indexOf(';');
if (semi != -1) {
header = header.substring(0, semi);
System.err.println("header main part is: "+header);
}
return com.yahoo.text.Lowercase.toLowerCase(header.trim());
} | System.err.println("header is: "+header); | static private String getMediaType(HttpRequest request) {
String header = request.getHeader(com.yahoo.jdisc.http.HttpHeaders.Names.CONTENT_TYPE);
if (header == null) {
return "";
}
int semi = header.indexOf(';');
if (semi != -1) {
header = header.substring(0, semi);
}
return com.yahoo.text.Lowercase.toLowerCase(header.trim());
} | class MeanConnections implements Callback {
@Override
public void run(Handle h, boolean firstTime) {
if (firstTime) {
metric.set(SEARCH_CONNECTIONS, 0.0d, null);
return;
}
Value v = (Value) h;
metric.set(SEARCH_CONNECTIONS, v.getMean(), null);
}
} | class MeanConnections implements Callback {
@Override
public void run(Handle h, boolean firstTime) {
if (firstTime) {
metric.set(SEARCH_CONNECTIONS, 0.0d, null);
return;
}
Value v = (Value) h;
metric.set(SEARCH_CONNECTIONS, v.getMean(), null);
}
} |
Consider expanding to if/elseif/else. Nested ternary is a bit hard to read 🙁 | public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Optional<Run> lastCompleted = runs.isEmpty() ? Optional.empty()
: runs.size() == 1 || runs.get(runs.size() - 1).hasEnded() ? Optional.of(runs.get(runs.size() - 1))
: Optional.of(runs.get(runs.size() - 2));
return badges.historic(id, lastCompleted, runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
} | Optional<Run> lastCompleted = runs.isEmpty() ? Optional.empty() | public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Run lastCompleted = null;
if (runs.size() == 1)
lastCompleted = runs.get(0);
if (runs.size() > 1 && ! lastCompleted.hasEnded())
lastCompleted = runs.get(runs.size() - 2);
return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
} | class JobController {
private static final int historyLength = 256;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
public JobController(Controller controller, RunDataStore runDataStore, TesterCloud testerCloud) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, runDataStore);
this.cloud = testerCloud;
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log records for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
locked(id, __ -> {
List<LogEntry> entries = messages.stream()
.map(message -> new LogEntry(0, controller.clock().millis(), LogEntry.typeOf(level), message))
.collect(toList());
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log record for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.putIfAbsent(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.filter(Optional::isPresent).map(Optional::get)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
Iterator<RunId> ids = runs.keySet().iterator();
for (RunId old = ids.next(); old.number() <= last - historyLength; old = ids.next()) {
logs.delete(old);
ids.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
byte[] packageBytes, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally()) {
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
}
long run = nextBuild(id);
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
packageBytes);
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application.withBuiltInternally(true), new ApplicationPackage(packageBytes));
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data, packages and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NoInstanceException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.filter(Optional::isPresent).map(Optional::get)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
Optional<URI> testerEndpoint(RunId id) {
ApplicationId tester = id.tester().id();
return controller.applications().getDeploymentEndpoints(new DeploymentId(tester, id.type().zone(controller.system())))
.flatMap(uris -> uris.stream().findAny());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
private void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} | class JobController {
private static final int historyLength = 256;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
public JobController(Controller controller, RunDataStore runDataStore, TesterCloud testerCloud) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, runDataStore);
this.cloud = testerCloud;
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log records for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
locked(id, __ -> {
List<LogEntry> entries = messages.stream()
.map(message -> new LogEntry(0, controller.clock().millis(), LogEntry.typeOf(level), message))
.collect(toList());
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log record for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.putIfAbsent(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.filter(Optional::isPresent).map(Optional::get)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
Iterator<RunId> ids = runs.keySet().iterator();
for (RunId old = ids.next(); old.number() <= last - historyLength; old = ids.next()) {
logs.delete(old);
ids.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
byte[] packageBytes, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally()) {
application.get().deployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
}
long run = nextBuild(id);
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
packageBytes);
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application.withBuiltInternally(true), new ApplicationPackage(packageBytes));
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data, packages and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NoInstanceException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.filter(Optional::isPresent).map(Optional::get)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
Optional<URI> testerEndpoint(RunId id) {
ApplicationId tester = id.tester().id();
return controller.applications().getDeploymentEndpoints(new DeploymentId(tester, id.type().zone(controller.system())))
.flatMap(uris -> uris.stream().findAny());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
private void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} |
My comment got somehow lost due to the force-push: Consider using `node.type().isDockerHost()` for parent check, it also handles infrastructure hosts and wont give false positives like BM tenant nodes. | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow);
List<Node> nodes = nodeRepository().getNodes();
NodeList recentlyFailedNodes = nodes.stream()
.filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
.collect(collectingAndThen(Collectors.toList(), NodeList::new));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false;
if (!node.parentHostname().isPresent() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(nodes.size())));
return true;
} | if (!node.parentHostname().isPresent() && | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow);
List<Node> nodes = nodeRepository().getNodes();
NodeList recentlyFailedNodes = nodes.stream()
.filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
.collect(collectingAndThen(Collectors.toList(), NodeList::new));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false;
if (!node.parentHostname().isPresent() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(nodes.size())));
return true;
} | class NodeFailer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
public static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
public static final String throttlingActiveMetric = "nodeFailThrottling";
/** Provides information about the status of ready hosts */
private final HostLivenessTracker hostLivenessTracker;
/** Provides (more accurate) information about the status of active hosts */
private final ServiceMonitor serviceMonitor;
private final Deployer deployer;
private final Duration downTimeLimit;
private final Clock clock;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
private final ConfigserverConfig configserverConfig;
public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker,
ServiceMonitor serviceMonitor, NodeRepository nodeRepository,
Duration downTimeLimit, Clock clock, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric,
JobControl jobControl,
ConfigserverConfig configserverConfig) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl);
this.deployer = deployer;
this.hostLivenessTracker = hostLivenessTracker;
this.serviceMonitor = serviceMonitor;
this.downTimeLimit = downTimeLimit;
this.clock = clock;
this.orchestrator = orchestrator;
this.constructionTime = clock.instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
this.configserverConfig = configserverConfig;
}
@Override
protected void maintain() {
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().lockUnallocated()) {
updateNodeLivenessEventsForReadyNodes();
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().fail(node.hostname(), Agent.system, reason);
}
}
updateNodeDownState();
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (!failAllowedFor(node.type())) {
continue;
}
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
}
private void updateNodeLivenessEventsForReadyNodes() {
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
if ( ! lastLocalRequest.isPresent()) continue;
if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) {
History updatedHistory = node.history()
.with(new History.Event(History.Event.Type.requested, Agent.system, lastLocalRequest.get()));
nodeRepository().write(node.with(updatedHistory));
}
}
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock.instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) {
nodesByFailureReason.put(node, "Not receiving config requests from node");
} else if (node.status().hardwareFailureDescription().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware failure");
} else if (node.status().hardwareDivergence().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware divergence");
}
}
return nodesByFailureReason;
}
/**
* If the node is down (see {@link
* Otherwise we remove any "down" history record.
*/
private void updateNodeDownState() {
Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream()
.collect(Collectors.toMap(Node::hostname, node -> node));
serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName()
.forEach((hostName, serviceInstances) -> {
Node node = activeNodesByHostname.get(hostName.s());
if (node == null) return;
if (badNode(serviceInstances)) {
recordAsDown(node);
} else {
clearDownRecord(node);
}
});
}
private Map<Node, String> getActiveNodesByFailureReason() {
Instant graceTimeEnd = clock.instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.active)) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
} else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) {
nodesByFailureReason.put(node, "Node has hardware failure");
}
}
return nodesByFailureReason;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN;
} catch (HostNameNotFoundException e) {
return false;
}
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* However, nodes of other types are not replaced (because all of the type are used by a single application),
* so we only allow one to be in failed at any point in time to protect against runaway failing.
*/
private boolean failAllowedFor(NodeType nodeType) {
if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true;
return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
}
/**
* Returns true if the node is considered bad: all monitored services services are down.
* If a node remains bad for a long time, the NodeFailer will eventually try to fail the node.
*/
public static boolean badNode(List<ServiceInstance> services) {
Map<ServiceStatus, Long> countsByStatus = services.stream()
.collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting()));
return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L &&
countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L;
}
/**
* Record a node as down if not already recorded and returns the node in the new state.
* This assumes the node is found in the node
* repo and that the node is allocated. If we get here otherwise something is truly odd.
*/
private Node recordAsDown(Node node) {
if (node.history().event(History.Event.Type.down).isPresent()) return node;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
return nodeRepository().write(node.downAt(clock.instant()));
}
}
private void clearDownRecord(Node node) {
if ( ! node.history().event(History.Event.Type.down).isPresent()) return;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
nodeRepository().write(node.up());
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if ( ! deployment.isPresent()) return false;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().fail(failingTenantNode.hostname(), Agent.system, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
node = nodeRepository().fail(node.hostname(), Agent.system, reason);
try {
deployment.get().activate();
return true;
}
catch (RuntimeException e) {
nodeRepository().reactivate(node.hostname(), Agent.system,
"Failed to redeploy after being failed by NodeFailer");
log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() +
", but redeploying without the node failed", e);
return false;
}
}
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} | class NodeFailer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
public static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
public static final String throttlingActiveMetric = "nodeFailThrottling";
/** Provides information about the status of ready hosts */
private final HostLivenessTracker hostLivenessTracker;
/** Provides (more accurate) information about the status of active hosts */
private final ServiceMonitor serviceMonitor;
private final Deployer deployer;
private final Duration downTimeLimit;
private final Clock clock;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
private final ConfigserverConfig configserverConfig;
public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker,
ServiceMonitor serviceMonitor, NodeRepository nodeRepository,
Duration downTimeLimit, Clock clock, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric,
JobControl jobControl,
ConfigserverConfig configserverConfig) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl);
this.deployer = deployer;
this.hostLivenessTracker = hostLivenessTracker;
this.serviceMonitor = serviceMonitor;
this.downTimeLimit = downTimeLimit;
this.clock = clock;
this.orchestrator = orchestrator;
this.constructionTime = clock.instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
this.configserverConfig = configserverConfig;
}
@Override
protected void maintain() {
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().lockUnallocated()) {
updateNodeLivenessEventsForReadyNodes();
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().fail(node.hostname(), Agent.system, reason);
}
}
updateNodeDownState();
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (!failAllowedFor(node.type())) {
continue;
}
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
}
private void updateNodeLivenessEventsForReadyNodes() {
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
if ( ! lastLocalRequest.isPresent()) continue;
if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) {
History updatedHistory = node.history()
.with(new History.Event(History.Event.Type.requested, Agent.system, lastLocalRequest.get()));
nodeRepository().write(node.with(updatedHistory));
}
}
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock.instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) {
nodesByFailureReason.put(node, "Not receiving config requests from node");
} else if (node.status().hardwareFailureDescription().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware failure");
} else if (node.status().hardwareDivergence().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware divergence");
}
}
return nodesByFailureReason;
}
/**
* If the node is down (see {@link
* Otherwise we remove any "down" history record.
*/
private void updateNodeDownState() {
Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream()
.collect(Collectors.toMap(Node::hostname, node -> node));
serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName()
.forEach((hostName, serviceInstances) -> {
Node node = activeNodesByHostname.get(hostName.s());
if (node == null) return;
if (badNode(serviceInstances)) {
recordAsDown(node);
} else {
clearDownRecord(node);
}
});
}
private Map<Node, String> getActiveNodesByFailureReason() {
Instant graceTimeEnd = clock.instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.active)) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
} else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) {
nodesByFailureReason.put(node, "Node has hardware failure");
}
}
return nodesByFailureReason;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN;
} catch (HostNameNotFoundException e) {
return false;
}
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* However, nodes of other types are not replaced (because all of the type are used by a single application),
* so we only allow one to be in failed at any point in time to protect against runaway failing.
*/
private boolean failAllowedFor(NodeType nodeType) {
if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true;
return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
}
/**
* Returns true if the node is considered bad: all monitored services services are down.
* If a node remains bad for a long time, the NodeFailer will eventually try to fail the node.
*/
public static boolean badNode(List<ServiceInstance> services) {
Map<ServiceStatus, Long> countsByStatus = services.stream()
.collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting()));
return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L &&
countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L;
}
/**
* Record a node as down if not already recorded and returns the node in the new state.
* This assumes the node is found in the node
* repo and that the node is allocated. If we get here otherwise something is truly odd.
*/
private Node recordAsDown(Node node) {
if (node.history().event(History.Event.Type.down).isPresent()) return node;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
return nodeRepository().write(node.downAt(clock.instant()));
}
}
private void clearDownRecord(Node node) {
if ( ! node.history().event(History.Event.Type.down).isPresent()) return;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
nodeRepository().write(node.up());
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if ( ! deployment.isPresent()) return false;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().fail(failingTenantNode.hostname(), Agent.system, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
node = nodeRepository().fail(node.hostname(), Agent.system, reason);
try {
deployment.get().activate();
return true;
}
catch (RuntimeException e) {
nodeRepository().reactivate(node.hostname(), Agent.system,
"Failed to redeploy after being failed by NodeFailer");
log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() +
", but redeploying without the node failed", e);
return false;
}
}
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} |
That's precisely why I used the `parentHostname().isPresent()` check, BM tenant nodes should not be throttled in this case. | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow);
List<Node> nodes = nodeRepository().getNodes();
NodeList recentlyFailedNodes = nodes.stream()
.filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
.collect(collectingAndThen(Collectors.toList(), NodeList::new));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false;
if (!node.parentHostname().isPresent() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(nodes.size())));
return true;
} | if (!node.parentHostname().isPresent() && | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow);
List<Node> nodes = nodeRepository().getNodes();
NodeList recentlyFailedNodes = nodes.stream()
.filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
.collect(collectingAndThen(Collectors.toList(), NodeList::new));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false;
if (!node.parentHostname().isPresent() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(nodes.size())));
return true;
} | class NodeFailer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
public static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
public static final String throttlingActiveMetric = "nodeFailThrottling";
/** Provides information about the status of ready hosts */
private final HostLivenessTracker hostLivenessTracker;
/** Provides (more accurate) information about the status of active hosts */
private final ServiceMonitor serviceMonitor;
private final Deployer deployer;
private final Duration downTimeLimit;
private final Clock clock;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
private final ConfigserverConfig configserverConfig;
public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker,
ServiceMonitor serviceMonitor, NodeRepository nodeRepository,
Duration downTimeLimit, Clock clock, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric,
JobControl jobControl,
ConfigserverConfig configserverConfig) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl);
this.deployer = deployer;
this.hostLivenessTracker = hostLivenessTracker;
this.serviceMonitor = serviceMonitor;
this.downTimeLimit = downTimeLimit;
this.clock = clock;
this.orchestrator = orchestrator;
this.constructionTime = clock.instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
this.configserverConfig = configserverConfig;
}
@Override
protected void maintain() {
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().lockUnallocated()) {
updateNodeLivenessEventsForReadyNodes();
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().fail(node.hostname(), Agent.system, reason);
}
}
updateNodeDownState();
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (!failAllowedFor(node.type())) {
continue;
}
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
}
private void updateNodeLivenessEventsForReadyNodes() {
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
if ( ! lastLocalRequest.isPresent()) continue;
if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) {
History updatedHistory = node.history()
.with(new History.Event(History.Event.Type.requested, Agent.system, lastLocalRequest.get()));
nodeRepository().write(node.with(updatedHistory));
}
}
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock.instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) {
nodesByFailureReason.put(node, "Not receiving config requests from node");
} else if (node.status().hardwareFailureDescription().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware failure");
} else if (node.status().hardwareDivergence().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware divergence");
}
}
return nodesByFailureReason;
}
/**
* If the node is down (see {@link
* Otherwise we remove any "down" history record.
*/
private void updateNodeDownState() {
Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream()
.collect(Collectors.toMap(Node::hostname, node -> node));
serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName()
.forEach((hostName, serviceInstances) -> {
Node node = activeNodesByHostname.get(hostName.s());
if (node == null) return;
if (badNode(serviceInstances)) {
recordAsDown(node);
} else {
clearDownRecord(node);
}
});
}
private Map<Node, String> getActiveNodesByFailureReason() {
Instant graceTimeEnd = clock.instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.active)) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
} else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) {
nodesByFailureReason.put(node, "Node has hardware failure");
}
}
return nodesByFailureReason;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN;
} catch (HostNameNotFoundException e) {
return false;
}
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* However, nodes of other types are not replaced (because all of the type are used by a single application),
* so we only allow one to be in failed at any point in time to protect against runaway failing.
*/
private boolean failAllowedFor(NodeType nodeType) {
if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true;
return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
}
/**
* Returns true if the node is considered bad: all monitored services services are down.
* If a node remains bad for a long time, the NodeFailer will eventually try to fail the node.
*/
public static boolean badNode(List<ServiceInstance> services) {
Map<ServiceStatus, Long> countsByStatus = services.stream()
.collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting()));
return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L &&
countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L;
}
/**
* Record a node as down if not already recorded and returns the node in the new state.
* This assumes the node is found in the node
* repo and that the node is allocated. If we get here otherwise something is truly odd.
*/
private Node recordAsDown(Node node) {
if (node.history().event(History.Event.Type.down).isPresent()) return node;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
return nodeRepository().write(node.downAt(clock.instant()));
}
}
private void clearDownRecord(Node node) {
if ( ! node.history().event(History.Event.Type.down).isPresent()) return;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
nodeRepository().write(node.up());
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if ( ! deployment.isPresent()) return false;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().fail(failingTenantNode.hostname(), Agent.system, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
node = nodeRepository().fail(node.hostname(), Agent.system, reason);
try {
deployment.get().activate();
return true;
}
catch (RuntimeException e) {
nodeRepository().reactivate(node.hostname(), Agent.system,
"Failed to redeploy after being failed by NodeFailer");
log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() +
", but redeploying without the node failed", e);
return false;
}
}
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} | class NodeFailer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
public static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
public static final String throttlingActiveMetric = "nodeFailThrottling";
/** Provides information about the status of ready hosts */
private final HostLivenessTracker hostLivenessTracker;
/** Provides (more accurate) information about the status of active hosts */
private final ServiceMonitor serviceMonitor;
private final Deployer deployer;
private final Duration downTimeLimit;
private final Clock clock;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
private final ConfigserverConfig configserverConfig;
public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker,
ServiceMonitor serviceMonitor, NodeRepository nodeRepository,
Duration downTimeLimit, Clock clock, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric,
JobControl jobControl,
ConfigserverConfig configserverConfig) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl);
this.deployer = deployer;
this.hostLivenessTracker = hostLivenessTracker;
this.serviceMonitor = serviceMonitor;
this.downTimeLimit = downTimeLimit;
this.clock = clock;
this.orchestrator = orchestrator;
this.constructionTime = clock.instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
this.configserverConfig = configserverConfig;
}
@Override
protected void maintain() {
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().lockUnallocated()) {
updateNodeLivenessEventsForReadyNodes();
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().fail(node.hostname(), Agent.system, reason);
}
}
updateNodeDownState();
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (!failAllowedFor(node.type())) {
continue;
}
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
}
private void updateNodeLivenessEventsForReadyNodes() {
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
if ( ! lastLocalRequest.isPresent()) continue;
if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) {
History updatedHistory = node.history()
.with(new History.Event(History.Event.Type.requested, Agent.system, lastLocalRequest.get()));
nodeRepository().write(node.with(updatedHistory));
}
}
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock.instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) {
nodesByFailureReason.put(node, "Not receiving config requests from node");
} else if (node.status().hardwareFailureDescription().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware failure");
} else if (node.status().hardwareDivergence().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware divergence");
}
}
return nodesByFailureReason;
}
/**
* If the node is down (see {@link
* Otherwise we remove any "down" history record.
*/
private void updateNodeDownState() {
Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream()
.collect(Collectors.toMap(Node::hostname, node -> node));
serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName()
.forEach((hostName, serviceInstances) -> {
Node node = activeNodesByHostname.get(hostName.s());
if (node == null) return;
if (badNode(serviceInstances)) {
recordAsDown(node);
} else {
clearDownRecord(node);
}
});
}
private Map<Node, String> getActiveNodesByFailureReason() {
Instant graceTimeEnd = clock.instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.active)) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
} else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) {
nodesByFailureReason.put(node, "Node has hardware failure");
}
}
return nodesByFailureReason;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN;
} catch (HostNameNotFoundException e) {
return false;
}
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* However, nodes of other types are not replaced (because all of the type are used by a single application),
* so we only allow one to be in failed at any point in time to protect against runaway failing.
*/
private boolean failAllowedFor(NodeType nodeType) {
if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true;
return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
}
/**
* Returns true if the node is considered bad: all monitored services services are down.
* If a node remains bad for a long time, the NodeFailer will eventually try to fail the node.
*/
public static boolean badNode(List<ServiceInstance> services) {
Map<ServiceStatus, Long> countsByStatus = services.stream()
.collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting()));
return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L &&
countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L;
}
/**
* Record a node as down if not already recorded and returns the node in the new state.
* This assumes the node is found in the node
* repo and that the node is allocated. If we get here otherwise something is truly odd.
*/
private Node recordAsDown(Node node) {
if (node.history().event(History.Event.Type.down).isPresent()) return node;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
return nodeRepository().write(node.downAt(clock.instant()));
}
}
private void clearDownRecord(Node node) {
if ( ! node.history().event(History.Event.Type.down).isPresent()) return;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
nodeRepository().write(node.up());
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if ( ! deployment.isPresent()) return false;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().fail(failingTenantNode.hostname(), Agent.system, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
node = nodeRepository().fail(node.hostname(), Agent.system, reason);
try {
deployment.get().activate();
return true;
}
catch (RuntimeException e) {
nodeRepository().reactivate(node.hostname(), Agent.system,
"Failed to redeploy after being failed by NodeFailer");
log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() +
", but redeploying without the node failed", e);
return false;
}
}
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} |
Ah, maybe the comments should refer to this as physical hosts rather than parents then? | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow);
List<Node> nodes = nodeRepository().getNodes();
NodeList recentlyFailedNodes = nodes.stream()
.filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
.collect(collectingAndThen(Collectors.toList(), NodeList::new));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false;
if (!node.parentHostname().isPresent() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(nodes.size())));
return true;
} | if (!node.parentHostname().isPresent() && | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow);
List<Node> nodes = nodeRepository().getNodes();
NodeList recentlyFailedNodes = nodes.stream()
.filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
.collect(collectingAndThen(Collectors.toList(), NodeList::new));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false;
if (!node.parentHostname().isPresent() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(nodes.size())));
return true;
} | class NodeFailer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
public static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
public static final String throttlingActiveMetric = "nodeFailThrottling";
/** Provides information about the status of ready hosts */
private final HostLivenessTracker hostLivenessTracker;
/** Provides (more accurate) information about the status of active hosts */
private final ServiceMonitor serviceMonitor;
private final Deployer deployer;
private final Duration downTimeLimit;
private final Clock clock;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
private final ConfigserverConfig configserverConfig;
public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker,
ServiceMonitor serviceMonitor, NodeRepository nodeRepository,
Duration downTimeLimit, Clock clock, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric,
JobControl jobControl,
ConfigserverConfig configserverConfig) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl);
this.deployer = deployer;
this.hostLivenessTracker = hostLivenessTracker;
this.serviceMonitor = serviceMonitor;
this.downTimeLimit = downTimeLimit;
this.clock = clock;
this.orchestrator = orchestrator;
this.constructionTime = clock.instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
this.configserverConfig = configserverConfig;
}
@Override
protected void maintain() {
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().lockUnallocated()) {
updateNodeLivenessEventsForReadyNodes();
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().fail(node.hostname(), Agent.system, reason);
}
}
updateNodeDownState();
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (!failAllowedFor(node.type())) {
continue;
}
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
}
private void updateNodeLivenessEventsForReadyNodes() {
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
if ( ! lastLocalRequest.isPresent()) continue;
if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) {
History updatedHistory = node.history()
.with(new History.Event(History.Event.Type.requested, Agent.system, lastLocalRequest.get()));
nodeRepository().write(node.with(updatedHistory));
}
}
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock.instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) {
nodesByFailureReason.put(node, "Not receiving config requests from node");
} else if (node.status().hardwareFailureDescription().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware failure");
} else if (node.status().hardwareDivergence().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware divergence");
}
}
return nodesByFailureReason;
}
/**
* If the node is down (see {@link
* Otherwise we remove any "down" history record.
*/
private void updateNodeDownState() {
Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream()
.collect(Collectors.toMap(Node::hostname, node -> node));
serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName()
.forEach((hostName, serviceInstances) -> {
Node node = activeNodesByHostname.get(hostName.s());
if (node == null) return;
if (badNode(serviceInstances)) {
recordAsDown(node);
} else {
clearDownRecord(node);
}
});
}
private Map<Node, String> getActiveNodesByFailureReason() {
Instant graceTimeEnd = clock.instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.active)) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
} else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) {
nodesByFailureReason.put(node, "Node has hardware failure");
}
}
return nodesByFailureReason;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN;
} catch (HostNameNotFoundException e) {
return false;
}
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* However, nodes of other types are not replaced (because all of the type are used by a single application),
* so we only allow one to be in failed at any point in time to protect against runaway failing.
*/
private boolean failAllowedFor(NodeType nodeType) {
if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true;
return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
}
/**
* Returns true if the node is considered bad: all monitored services services are down.
* If a node remains bad for a long time, the NodeFailer will eventually try to fail the node.
*/
public static boolean badNode(List<ServiceInstance> services) {
Map<ServiceStatus, Long> countsByStatus = services.stream()
.collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting()));
return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L &&
countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L;
}
/**
* Record a node as down if not already recorded and returns the node in the new state.
* This assumes the node is found in the node
* repo and that the node is allocated. If we get here otherwise something is truly odd.
*/
private Node recordAsDown(Node node) {
if (node.history().event(History.Event.Type.down).isPresent()) return node;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
return nodeRepository().write(node.downAt(clock.instant()));
}
}
private void clearDownRecord(Node node) {
if ( ! node.history().event(History.Event.Type.down).isPresent()) return;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
nodeRepository().write(node.up());
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if ( ! deployment.isPresent()) return false;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().fail(failingTenantNode.hostname(), Agent.system, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
node = nodeRepository().fail(node.hostname(), Agent.system, reason);
try {
deployment.get().activate();
return true;
}
catch (RuntimeException e) {
nodeRepository().reactivate(node.hostname(), Agent.system,
"Failed to redeploy after being failed by NodeFailer");
log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() +
", but redeploying without the node failed", e);
return false;
}
}
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} | class NodeFailer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
public static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
public static final String throttlingActiveMetric = "nodeFailThrottling";
/** Provides information about the status of ready hosts */
private final HostLivenessTracker hostLivenessTracker;
/** Provides (more accurate) information about the status of active hosts */
private final ServiceMonitor serviceMonitor;
private final Deployer deployer;
private final Duration downTimeLimit;
private final Clock clock;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
private final ConfigserverConfig configserverConfig;
public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker,
ServiceMonitor serviceMonitor, NodeRepository nodeRepository,
Duration downTimeLimit, Clock clock, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric,
JobControl jobControl,
ConfigserverConfig configserverConfig) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl);
this.deployer = deployer;
this.hostLivenessTracker = hostLivenessTracker;
this.serviceMonitor = serviceMonitor;
this.downTimeLimit = downTimeLimit;
this.clock = clock;
this.orchestrator = orchestrator;
this.constructionTime = clock.instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
this.configserverConfig = configserverConfig;
}
@Override
protected void maintain() {
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().lockUnallocated()) {
updateNodeLivenessEventsForReadyNodes();
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().fail(node.hostname(), Agent.system, reason);
}
}
updateNodeDownState();
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (!failAllowedFor(node.type())) {
continue;
}
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
}
private void updateNodeLivenessEventsForReadyNodes() {
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
if ( ! lastLocalRequest.isPresent()) continue;
if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) {
History updatedHistory = node.history()
.with(new History.Event(History.Event.Type.requested, Agent.system, lastLocalRequest.get()));
nodeRepository().write(node.with(updatedHistory));
}
}
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock.instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) {
nodesByFailureReason.put(node, "Not receiving config requests from node");
} else if (node.status().hardwareFailureDescription().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware failure");
} else if (node.status().hardwareDivergence().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware divergence");
}
}
return nodesByFailureReason;
}
/**
* If the node is down (see {@link
* Otherwise we remove any "down" history record.
*/
private void updateNodeDownState() {
Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream()
.collect(Collectors.toMap(Node::hostname, node -> node));
serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName()
.forEach((hostName, serviceInstances) -> {
Node node = activeNodesByHostname.get(hostName.s());
if (node == null) return;
if (badNode(serviceInstances)) {
recordAsDown(node);
} else {
clearDownRecord(node);
}
});
}
private Map<Node, String> getActiveNodesByFailureReason() {
Instant graceTimeEnd = clock.instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.active)) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
} else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) {
nodesByFailureReason.put(node, "Node has hardware failure");
}
}
return nodesByFailureReason;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN;
} catch (HostNameNotFoundException e) {
return false;
}
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* However, nodes of other types are not replaced (because all of the type are used by a single application),
* so we only allow one to be in failed at any point in time to protect against runaway failing.
*/
private boolean failAllowedFor(NodeType nodeType) {
if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true;
return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
}
/**
* Returns true if the node is considered bad: all monitored services services are down.
* If a node remains bad for a long time, the NodeFailer will eventually try to fail the node.
*/
public static boolean badNode(List<ServiceInstance> services) {
Map<ServiceStatus, Long> countsByStatus = services.stream()
.collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting()));
return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L &&
countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L;
}
/**
* Record a node as down if not already recorded and returns the node in the new state.
* This assumes the node is found in the node
* repo and that the node is allocated. If we get here otherwise something is truly odd.
*/
private Node recordAsDown(Node node) {
if (node.history().event(History.Event.Type.down).isPresent()) return node;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
return nodeRepository().write(node.downAt(clock.instant()));
}
}
private void clearDownRecord(Node node) {
if ( ! node.history().event(History.Event.Type.down).isPresent()) return;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
nodeRepository().write(node.up());
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if ( ! deployment.isPresent()) return false;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().fail(failingTenantNode.hostname(), Agent.system, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
node = nodeRepository().fail(node.hostname(), Agent.system, reason);
try {
deployment.get().activate();
return true;
}
catch (RuntimeException e) {
nodeRepository().reactivate(node.hostname(), Agent.system,
"Failed to redeploy after being failed by NodeFailer");
log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() +
", but redeploying without the node failed", e);
return false;
}
}
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} |
Sure. | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow);
List<Node> nodes = nodeRepository().getNodes();
NodeList recentlyFailedNodes = nodes.stream()
.filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
.collect(collectingAndThen(Collectors.toList(), NodeList::new));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false;
if (!node.parentHostname().isPresent() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(nodes.size())));
return true;
} | if (!node.parentHostname().isPresent() && | private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow);
List<Node> nodes = nodeRepository().getNodes();
NodeList recentlyFailedNodes = nodes.stream()
.filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
.collect(collectingAndThen(Collectors.toList(), NodeList::new));
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false;
if (!node.parentHostname().isPresent() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
throttlePolicy.toHumanReadableString(nodes.size())));
return true;
} | class NodeFailer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
public static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
public static final String throttlingActiveMetric = "nodeFailThrottling";
/** Provides information about the status of ready hosts */
private final HostLivenessTracker hostLivenessTracker;
/** Provides (more accurate) information about the status of active hosts */
private final ServiceMonitor serviceMonitor;
private final Deployer deployer;
private final Duration downTimeLimit;
private final Clock clock;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
private final ConfigserverConfig configserverConfig;
public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker,
ServiceMonitor serviceMonitor, NodeRepository nodeRepository,
Duration downTimeLimit, Clock clock, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric,
JobControl jobControl,
ConfigserverConfig configserverConfig) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl);
this.deployer = deployer;
this.hostLivenessTracker = hostLivenessTracker;
this.serviceMonitor = serviceMonitor;
this.downTimeLimit = downTimeLimit;
this.clock = clock;
this.orchestrator = orchestrator;
this.constructionTime = clock.instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
this.configserverConfig = configserverConfig;
}
@Override
protected void maintain() {
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().lockUnallocated()) {
updateNodeLivenessEventsForReadyNodes();
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().fail(node.hostname(), Agent.system, reason);
}
}
updateNodeDownState();
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (!failAllowedFor(node.type())) {
continue;
}
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
}
private void updateNodeLivenessEventsForReadyNodes() {
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
if ( ! lastLocalRequest.isPresent()) continue;
if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) {
History updatedHistory = node.history()
.with(new History.Event(History.Event.Type.requested, Agent.system, lastLocalRequest.get()));
nodeRepository().write(node.with(updatedHistory));
}
}
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock.instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) {
nodesByFailureReason.put(node, "Not receiving config requests from node");
} else if (node.status().hardwareFailureDescription().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware failure");
} else if (node.status().hardwareDivergence().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware divergence");
}
}
return nodesByFailureReason;
}
/**
* If the node is down (see {@link
* Otherwise we remove any "down" history record.
*/
private void updateNodeDownState() {
Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream()
.collect(Collectors.toMap(Node::hostname, node -> node));
serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName()
.forEach((hostName, serviceInstances) -> {
Node node = activeNodesByHostname.get(hostName.s());
if (node == null) return;
if (badNode(serviceInstances)) {
recordAsDown(node);
} else {
clearDownRecord(node);
}
});
}
private Map<Node, String> getActiveNodesByFailureReason() {
Instant graceTimeEnd = clock.instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.active)) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
} else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) {
nodesByFailureReason.put(node, "Node has hardware failure");
}
}
return nodesByFailureReason;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN;
} catch (HostNameNotFoundException e) {
return false;
}
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* However, nodes of other types are not replaced (because all of the type are used by a single application),
* so we only allow one to be in failed at any point in time to protect against runaway failing.
*/
private boolean failAllowedFor(NodeType nodeType) {
if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true;
return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
}
/**
* Returns true if the node is considered bad: all monitored services services are down.
* If a node remains bad for a long time, the NodeFailer will eventually try to fail the node.
*/
public static boolean badNode(List<ServiceInstance> services) {
Map<ServiceStatus, Long> countsByStatus = services.stream()
.collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting()));
return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L &&
countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L;
}
/**
* Record a node as down if not already recorded and returns the node in the new state.
* This assumes the node is found in the node
* repo and that the node is allocated. If we get here otherwise something is truly odd.
*/
private Node recordAsDown(Node node) {
if (node.history().event(History.Event.Type.down).isPresent()) return node;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
return nodeRepository().write(node.downAt(clock.instant()));
}
}
private void clearDownRecord(Node node) {
if ( ! node.history().event(History.Event.Type.down).isPresent()) return;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
nodeRepository().write(node.up());
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if ( ! deployment.isPresent()) return false;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().fail(failingTenantNode.hostname(), Agent.system, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
node = nodeRepository().fail(node.hostname(), Agent.system, reason);
try {
deployment.get().activate();
return true;
}
catch (RuntimeException e) {
nodeRepository().reactivate(node.hostname(), Agent.system,
"Failed to redeploy after being failed by NodeFailer");
log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() +
", but redeploying without the node failed", e);
return false;
}
}
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} | class NodeFailer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeFailer.class.getName());
private static final Duration nodeRequestInterval = Duration.ofMinutes(10);
/** Metric for number of nodes that we want to fail, but cannot due to throttling */
public static final String throttledNodeFailuresMetric = "throttledNodeFailures";
/** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */
public static final String throttlingActiveMetric = "nodeFailThrottling";
/** Provides information about the status of ready hosts */
private final HostLivenessTracker hostLivenessTracker;
/** Provides (more accurate) information about the status of active hosts */
private final ServiceMonitor serviceMonitor;
private final Deployer deployer;
private final Duration downTimeLimit;
private final Clock clock;
private final Orchestrator orchestrator;
private final Instant constructionTime;
private final ThrottlePolicy throttlePolicy;
private final Metric metric;
private final ConfigserverConfig configserverConfig;
public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker,
ServiceMonitor serviceMonitor, NodeRepository nodeRepository,
Duration downTimeLimit, Clock clock, Orchestrator orchestrator,
ThrottlePolicy throttlePolicy, Metric metric,
JobControl jobControl,
ConfigserverConfig configserverConfig) {
super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl);
this.deployer = deployer;
this.hostLivenessTracker = hostLivenessTracker;
this.serviceMonitor = serviceMonitor;
this.downTimeLimit = downTimeLimit;
this.clock = clock;
this.orchestrator = orchestrator;
this.constructionTime = clock.instant();
this.throttlePolicy = throttlePolicy;
this.metric = metric;
this.configserverConfig = configserverConfig;
}
@Override
protected void maintain() {
int throttledNodeFailures = 0;
try (Mutex lock = nodeRepository().lockUnallocated()) {
updateNodeLivenessEventsForReadyNodes();
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
nodeRepository().fail(node.hostname(), Agent.system, reason);
}
}
updateNodeDownState();
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (!failAllowedFor(node.type())) {
continue;
}
if (throttle(node)) {
throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
}
private void updateNodeLivenessEventsForReadyNodes() {
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
if ( ! lastLocalRequest.isPresent()) continue;
if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) {
History updatedHistory = node.history()
.with(new History.Event(History.Event.Type.requested, Agent.system, lastLocalRequest.get()));
nodeRepository().write(node.with(updatedHistory));
}
}
}
private Map<Node, String> getReadyNodesByFailureReason() {
Instant oldestAcceptableRequestTime =
constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ?
Instant.EPOCH :
clock.instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.ready)) {
if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) {
nodesByFailureReason.put(node, "Not receiving config requests from node");
} else if (node.status().hardwareFailureDescription().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware failure");
} else if (node.status().hardwareDivergence().isPresent()) {
nodesByFailureReason.put(node, "Node has hardware divergence");
}
}
return nodesByFailureReason;
}
/**
* If the node is down (see {@link
* Otherwise we remove any "down" history record.
*/
private void updateNodeDownState() {
Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream()
.collect(Collectors.toMap(Node::hostname, node -> node));
serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName()
.forEach((hostName, serviceInstances) -> {
Node node = activeNodesByHostname.get(hostName.s());
if (node == null) return;
if (badNode(serviceInstances)) {
recordAsDown(node);
} else {
clearDownRecord(node);
}
});
}
private Map<Node, String> getActiveNodesByFailureReason() {
Instant graceTimeEnd = clock.instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : nodeRepository().getNodes(Node.State.active)) {
if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) {
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
} else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) {
nodesByFailureReason.put(node, "Node has hardware failure");
}
}
return nodesByFailureReason;
}
private boolean expectConfigRequests(Node node) {
return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer();
}
private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) {
return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant);
}
private boolean wasMadeReadyBefore(Node node, Instant instant) {
return node.history().hasEventBefore(History.Event.Type.readied, instant);
}
private boolean hasRecordedRequestAfter(Node node, Instant instant) {
return node.history().hasEventAfter(History.Event.Type.requested, instant);
}
private boolean applicationSuspended(Node node) {
try {
return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
== ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
} catch (ApplicationIdNotFoundException e) {
return false;
}
}
private boolean nodeSuspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN;
} catch (HostNameNotFoundException e) {
return false;
}
}
/**
* We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected
* unless the node is replaced.
* However, nodes of other types are not replaced (because all of the type are used by a single application),
* so we only allow one to be in failed at any point in time to protect against runaway failing.
*/
private boolean failAllowedFor(NodeType nodeType) {
if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true;
return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
}
/**
* Returns true if the node is considered bad: all monitored services services are down.
* If a node remains bad for a long time, the NodeFailer will eventually try to fail the node.
*/
public static boolean badNode(List<ServiceInstance> services) {
Map<ServiceStatus, Long> countsByStatus = services.stream()
.collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting()));
return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L &&
countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L;
}
/**
* Record a node as down if not already recorded and returns the node in the new state.
* This assumes the node is found in the node
* repo and that the node is allocated. If we get here otherwise something is truly odd.
*/
private Node recordAsDown(Node node) {
if (node.history().event(History.Event.Type.down).isPresent()) return node;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
return nodeRepository().write(node.downAt(clock.instant()));
}
}
private void clearDownRecord(Node node) {
if ( ! node.history().event(History.Event.Type.down).isPresent()) return;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
node = nodeRepository().getNode(node.hostname(), Node.State.active).get();
nodeRepository().write(node.up());
}
}
/**
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
*
* @return whether node was successfully failed
*/
private boolean failActive(Node node, String reason) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
if ( ! deployment.isPresent()) return false;
try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason;
for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) {
if (failingTenantNode.state() == Node.State.active) {
allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure);
} else {
nodeRepository().fail(failingTenantNode.hostname(), Agent.system, reasonForChildFailure);
}
}
if (! allTenantNodesFailedOutSuccessfully) return false;
node = nodeRepository().fail(node.hostname(), Agent.system, reason);
try {
deployment.get().activate();
return true;
}
catch (RuntimeException e) {
nodeRepository().reactivate(node.hostname(), Agent.system,
"Failed to redeploy after being failed by NodeFailer");
log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() +
", but redeploying without the node failed", e);
return false;
}
}
}
/** Returns true if node failing should be throttled */
public enum ThrottlePolicy {
hosted(Duration.ofDays(1), 0.02, 2),
disabled(Duration.ZERO, 0, 0);
private final Duration throttleWindow;
private final double fractionAllowedToFail;
private final int minimumAllowedToFail;
ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) {
this.throttleWindow = throttleWindow;
this.fractionAllowedToFail = fractionAllowedToFail;
this.minimumAllowedToFail = minimumAllowedToFail;
}
public int allowedToFailOf(int totalNodes) {
return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail);
}
public String toHumanReadableString(int totalNodes) {
return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100,
allowedToFailOf(totalNodes),
minimumAllowedToFail, throttleWindow);
}
}
} |
In order to get the flag value for a particular application, you need to tell the flag what application this is for before value() with a call to `with(Dimension.APPLICATION_ID, anApplicationId.serializedForm())` | private NodesSpecification createNodesSpecificationForLogserver() {
DeployState deployState = context.getDeployState();
if (deployState.getProperties().useDedicatedNodeForLogserver() &&
context.getApplicationType() == ConfigModelContext.ApplicationType.DEFAULT &&
deployState.isHosted() &&
Flags.ENABLE_LOGSERVER.bindTo(deployState.flagSource()).value())
return NodesSpecification.dedicated(1, context);
else
return NodesSpecification.nonDedicated(1, context);
} | Flags.ENABLE_LOGSERVER.bindTo(deployState.flagSource()).value()) | private NodesSpecification createNodesSpecificationForLogserver() {
DeployState deployState = context.getDeployState();
if (deployState.getProperties().useDedicatedNodeForLogserver() &&
context.getApplicationType() == ConfigModelContext.ApplicationType.DEFAULT &&
deployState.isHosted() &&
logServerFlagValue(deployState))
return NodesSpecification.dedicated(1, context);
else
return NodesSpecification.nonDedicated(1, context);
} | class DomAdminV4Builder extends DomAdminBuilderBase {
private ApplicationId ZONE_APPLICATION_ID = ApplicationId.from("hosted-vespa", "routing", "default");
private final Collection<ContainerModel> containerModels;
private final ConfigModelContext context;
public DomAdminV4Builder(ConfigModelContext context, boolean multitenant, List<ConfigServerSpec> configServerSpecs,
Collection<ContainerModel> containerModels) {
super(context.getApplicationType(), context.getDeployState().getFileRegistry(), multitenant,
configServerSpecs);
this.containerModels = containerModels;
this.context = context;
}
@Override
protected void doBuildAdmin(DeployState deployState, Admin admin, Element w3cAdminElement) {
ModelElement adminElement = new ModelElement(w3cAdminElement);
admin.addConfigservers(getConfigServersFromSpec(deployState.getDeployLogger(), admin));
Optional<NodesSpecification> requestedSlobroks =
NodesSpecification.optionalDedicatedFromParent(adminElement.getChild("slobroks"), context);
Optional<NodesSpecification> requestedLogservers =
NodesSpecification.optionalDedicatedFromParent(adminElement.getChild("logservers"), context);
assignSlobroks(deployState.getDeployLogger(), requestedSlobroks.orElse(NodesSpecification.nonDedicated(3, context)), admin);
assignLogserver(deployState, requestedLogservers.orElse(createNodesSpecificationForLogserver()), admin);
addLogForwarders(adminElement.getChild("logforwarding"), admin);
}
private void assignSlobroks(DeployLogger deployLogger, NodesSpecification nodesSpecification, Admin admin) {
if (nodesSpecification.isDedicated()) {
createSlobroks(deployLogger, admin, allocateHosts(admin.getHostSystem(), "slobroks", nodesSpecification));
}
else {
createSlobroks(deployLogger, admin, pickContainerHostsForSlobrok(nodesSpecification.count(), 2));
}
}
private void assignLogserver(DeployState deployState, NodesSpecification nodesSpecification, Admin admin) {
if (nodesSpecification.count() > 1) throw new IllegalArgumentException("You can only request a single log server");
if (nodesSpecification.isDedicated()) {
Collection<HostResource> hosts = allocateHosts(admin.getHostSystem(), "logserver", nodesSpecification);
if (hosts.isEmpty()) return;
Logserver logserver = createLogserver(deployState.getDeployLogger(), admin, hosts);
createAdditionalContainerOnLogserverHost(deployState, admin, logserver.getHostResource());
} else if (containerModels.iterator().hasNext()) {
List<HostResource> hosts = sortedContainerHostsFrom(containerModels.iterator().next(), nodesSpecification.count(), false);
if (hosts.isEmpty()) return;
createLogserver(deployState.getDeployLogger(), admin, hosts);
} else {
context.getDeployLogger().log(LogLevel.INFO, "No container host available to use for running logserver");
}
}
private void createAdditionalContainerOnLogserverHost(DeployState deployState, Admin admin, HostResource hostResource) {
ContainerCluster logServerCluster = new ContainerCluster(admin, "logserver-cluster", "logserver-cluster", deployState);
ContainerModel logserverClusterModel = new ContainerModel(context.withParent(admin).withId(logServerCluster.getSubId()));
logServerCluster.addMetricStateHandler();
logServerCluster.addApplicationStatusHandler();
logServerCluster.addDefaultRootHandler();
logServerCluster.addVipHandler();
addLogHandler(logServerCluster);
logserverClusterModel.setCluster(logServerCluster);
Container container = new Container(logServerCluster, "" + 0, 0, deployState.isHosted());
container.setHostResource(hostResource);
container.initService(deployState.getDeployLogger());
logServerCluster.addContainer(container);
admin.addAndInitializeService(deployState.getDeployLogger(), hostResource, container);
admin.setLogserverContainerCluster(logServerCluster);
}
private void addLogHandler(ContainerCluster cluster) {
Handler<?> logHandler = Handler.fromClassName("com.yahoo.container.handler.LogHandler");
logHandler.addServerBindings("http:
cluster.addComponent(logHandler);
}
private Collection<HostResource> allocateHosts(HostSystem hostSystem, String clusterId, NodesSpecification nodesSpecification) {
return nodesSpecification.provision(hostSystem,
ClusterSpec.Type.admin,
ClusterSpec.Id.from(clusterId),
context.getDeployLogger()).keySet();
}
/**
* Returns a list of container hosts to use for an auxiliary cluster.
* The list returns the same nodes on each invocation given the same available nodes.
*
* @param count the desired number of nodes. More nodes may be returned to ensure a smooth transition
* on topology changes, and less nodes may be returned if fewer are available
* @param minHostsPerContainerCluster the desired number of hosts per cluster
*/
private List<HostResource> pickContainerHostsForSlobrok(int count, int minHostsPerContainerCluster) {
Collection<ContainerModel> containerModelsWithSlobrok = containerModels.stream()
.filter(this::shouldHaveSlobrok)
.collect(Collectors.toList());
int hostsPerCluster = (int) Math.max(minHostsPerContainerCluster,
Math.ceil((double) count / containerModelsWithSlobrok.size()));
List<HostResource> picked = new ArrayList<>();
for (ContainerModel containerModel : containerModelsWithSlobrok)
picked.addAll(pickContainerHostsFrom(containerModel, hostsPerCluster));
return picked;
}
private boolean shouldHaveSlobrok(ContainerModel containerModel) {
ApplicationId applicationId = context.getDeployState().getProperties().applicationId();
if (!applicationId.equals(ZONE_APPLICATION_ID)) {
return true;
}
String clustername = containerModel.getCluster().getName();
return !Objects.equals(clustername, "node-admin");
}
private List<HostResource> pickContainerHostsFrom(ContainerModel model, int count) {
boolean retired = true;
List<HostResource> picked = sortedContainerHostsFrom(model, count, !retired);
picked.addAll(sortedContainerHostsFrom(model, count, retired));
return picked;
}
/** Returns the count first containers in the current model having isRetired set to the given value */
private List<HostResource> sortedContainerHostsFrom(ContainerModel model, int count, boolean retired) {
List<HostResource> hosts = model.getCluster().getContainers().stream()
.filter(container -> retired == container.isRetired())
.map(Container::getHostResource)
.collect(Collectors.toList());
return HostResource.pickHosts(hosts, count, 1);
}
private Logserver createLogserver(DeployLogger deployLogger, Admin admin, Collection<HostResource> hosts) {
Logserver logserver = new Logserver(admin);
logserver.setHostResource(hosts.iterator().next());
admin.setLogserver(logserver);
logserver.initService(deployLogger);
return logserver;
}
private void createSlobroks(DeployLogger deployLogger, Admin admin, Collection<HostResource> hosts) {
if (hosts.isEmpty()) return;
List<Slobrok> slobroks = new ArrayList<>();
int index = 0;
for (HostResource host : hosts) {
Slobrok slobrok = new Slobrok(admin, index++);
slobrok.setHostResource(host);
slobroks.add(slobrok);
slobrok.initService(deployLogger);
}
admin.addSlobroks(slobroks);
}
} | class DomAdminV4Builder extends DomAdminBuilderBase {
private ApplicationId ZONE_APPLICATION_ID = ApplicationId.from("hosted-vespa", "routing", "default");
private final Collection<ContainerModel> containerModels;
private final ConfigModelContext context;
public DomAdminV4Builder(ConfigModelContext context, boolean multitenant, List<ConfigServerSpec> configServerSpecs,
Collection<ContainerModel> containerModels) {
super(context.getApplicationType(), context.getDeployState().getFileRegistry(), multitenant,
configServerSpecs);
this.containerModels = containerModels;
this.context = context;
}
@Override
protected void doBuildAdmin(DeployState deployState, Admin admin, Element w3cAdminElement) {
ModelElement adminElement = new ModelElement(w3cAdminElement);
admin.addConfigservers(getConfigServersFromSpec(deployState.getDeployLogger(), admin));
Optional<NodesSpecification> requestedSlobroks =
NodesSpecification.optionalDedicatedFromParent(adminElement.getChild("slobroks"), context);
Optional<NodesSpecification> requestedLogservers =
NodesSpecification.optionalDedicatedFromParent(adminElement.getChild("logservers"), context);
assignSlobroks(deployState.getDeployLogger(), requestedSlobroks.orElse(NodesSpecification.nonDedicated(3, context)), admin);
assignLogserver(deployState, requestedLogservers.orElse(createNodesSpecificationForLogserver()), admin);
addLogForwarders(adminElement.getChild("logforwarding"), admin);
}
private void assignSlobroks(DeployLogger deployLogger, NodesSpecification nodesSpecification, Admin admin) {
if (nodesSpecification.isDedicated()) {
createSlobroks(deployLogger, admin, allocateHosts(admin.getHostSystem(), "slobroks", nodesSpecification));
}
else {
createSlobroks(deployLogger, admin, pickContainerHostsForSlobrok(nodesSpecification.count(), 2));
}
}
private void assignLogserver(DeployState deployState, NodesSpecification nodesSpecification, Admin admin) {
if (nodesSpecification.count() > 1) throw new IllegalArgumentException("You can only request a single log server");
if (nodesSpecification.isDedicated()) {
Collection<HostResource> hosts = allocateHosts(admin.getHostSystem(), "logserver", nodesSpecification);
if (hosts.isEmpty()) return;
Logserver logserver = createLogserver(deployState.getDeployLogger(), admin, hosts);
createAdditionalContainerOnLogserverHost(deployState, admin, logserver.getHostResource());
} else if (containerModels.iterator().hasNext()) {
List<HostResource> hosts = sortedContainerHostsFrom(containerModels.iterator().next(), nodesSpecification.count(), false);
if (hosts.isEmpty()) return;
createLogserver(deployState.getDeployLogger(), admin, hosts);
} else {
context.getDeployLogger().log(LogLevel.INFO, "No container host available to use for running logserver");
}
}
private void createAdditionalContainerOnLogserverHost(DeployState deployState, Admin admin, HostResource hostResource) {
ContainerCluster logServerCluster = new ContainerCluster(admin, "logserver-cluster", "logserver-cluster", deployState);
ContainerModel logserverClusterModel = new ContainerModel(context.withParent(admin).withId(logServerCluster.getSubId()));
logServerCluster.addMetricStateHandler();
logServerCluster.addApplicationStatusHandler();
logServerCluster.addDefaultRootHandler();
logServerCluster.addVipHandler();
addLogHandler(logServerCluster);
logserverClusterModel.setCluster(logServerCluster);
Container container = new Container(logServerCluster, "" + 0, 0, deployState.isHosted());
container.setHostResource(hostResource);
container.initService(deployState.getDeployLogger());
logServerCluster.addContainer(container);
admin.addAndInitializeService(deployState.getDeployLogger(), hostResource, container);
admin.setLogserverContainerCluster(logServerCluster);
}
private void addLogHandler(ContainerCluster cluster) {
Handler<?> logHandler = Handler.fromClassName("com.yahoo.container.handler.LogHandler");
logHandler.addServerBindings("http:
cluster.addComponent(logHandler);
}
private Collection<HostResource> allocateHosts(HostSystem hostSystem, String clusterId, NodesSpecification nodesSpecification) {
return nodesSpecification.provision(hostSystem,
ClusterSpec.Type.admin,
ClusterSpec.Id.from(clusterId),
context.getDeployLogger()).keySet();
}
/**
* Returns a list of container hosts to use for an auxiliary cluster.
* The list returns the same nodes on each invocation given the same available nodes.
*
* @param count the desired number of nodes. More nodes may be returned to ensure a smooth transition
* on topology changes, and less nodes may be returned if fewer are available
* @param minHostsPerContainerCluster the desired number of hosts per cluster
*/
private List<HostResource> pickContainerHostsForSlobrok(int count, int minHostsPerContainerCluster) {
Collection<ContainerModel> containerModelsWithSlobrok = containerModels.stream()
.filter(this::shouldHaveSlobrok)
.collect(Collectors.toList());
int hostsPerCluster = (int) Math.max(minHostsPerContainerCluster,
Math.ceil((double) count / containerModelsWithSlobrok.size()));
List<HostResource> picked = new ArrayList<>();
for (ContainerModel containerModel : containerModelsWithSlobrok)
picked.addAll(pickContainerHostsFrom(containerModel, hostsPerCluster));
return picked;
}
private boolean shouldHaveSlobrok(ContainerModel containerModel) {
ApplicationId applicationId = context.getDeployState().getProperties().applicationId();
if (!applicationId.equals(ZONE_APPLICATION_ID)) {
return true;
}
String clustername = containerModel.getCluster().getName();
return !Objects.equals(clustername, "node-admin");
}
private List<HostResource> pickContainerHostsFrom(ContainerModel model, int count) {
boolean retired = true;
List<HostResource> picked = sortedContainerHostsFrom(model, count, !retired);
picked.addAll(sortedContainerHostsFrom(model, count, retired));
return picked;
}
/** Returns the count first containers in the current model having isRetired set to the given value */
private List<HostResource> sortedContainerHostsFrom(ContainerModel model, int count, boolean retired) {
List<HostResource> hosts = model.getCluster().getContainers().stream()
.filter(container -> retired == container.isRetired())
.map(Container::getHostResource)
.collect(Collectors.toList());
return HostResource.pickHosts(hosts, count, 1);
}
private Logserver createLogserver(DeployLogger deployLogger, Admin admin, Collection<HostResource> hosts) {
Logserver logserver = new Logserver(admin);
logserver.setHostResource(hosts.iterator().next());
admin.setLogserver(logserver);
logserver.initService(deployLogger);
return logserver;
}
private void createSlobroks(DeployLogger deployLogger, Admin admin, Collection<HostResource> hosts) {
if (hosts.isEmpty()) return;
List<Slobrok> slobroks = new ArrayList<>();
int index = 0;
for (HostResource host : hosts) {
Slobrok slobrok = new Slobrok(admin, index++);
slobrok.setHostResource(host);
slobroks.add(slobrok);
slobrok.initService(deployLogger);
}
admin.addSlobroks(slobroks);
}
private boolean logServerFlagValue(DeployState deployState) {
return Flags.ENABLE_LOGSERVER.bindTo(deployState.flagSource())
.with(FetchVector.Dimension.APPLICATION_ID, deployState.getProperties().applicationId().serializedForm())
.value();
}
} |
For each agent in sequence, this would set frozen and then wait until completion (or timeout), and therefore take a time proportional to the number of agents running. Could we instead first set want frozen on all agents, and then wait on all? That would be capped to wait for the longest running agent. | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT)) | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} |
Right, I changed it to `parallelStream()` for that reason. Given that the method is lightweight and most of the time will be spent sleeping, I assume that it would be able to start the wait for all of them at (nearly) the same time? | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT)) | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} |
I think so. So the parallelstream will spawn up to 32 threads to accomplish this? Isn't it implementation defined and it may spawn less, etc? I mean, there had been millions of elements it wouldn't spawed that many threads - there's got to be some limitation. Anyway, parallelStream() is probably fine. | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT)) | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} |
My assumption is that the method either completes immediately or is put to sleep immediately, and when either of those happens, that thread will start executing the next `setFrozen()`? So even if there are not enough threads for all `NodeAgent`s, they shouldn't have to wait long. | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT)) | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} |
A thread put to sleep cannot start executing the next setFrozen. (Well, they can with 'fibers', which I presume Java doesn't implement yet (project Loom)) | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT)) | public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, NODE_AGENT_FREEZE_TIMEOUT))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} | class NodeAdminImpl implements NodeAdmin {
private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class);
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private final ScheduledExecutorService aclScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler"));
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final Optional<AclMaintainer> aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
nodeAgentContextFactory, aclMaintainer, metricReceiver, clock);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
NodeAgentContextFactory nodeAgentContextFactory,
Optional<AclMaintainer> aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
}
@Override
public void refreshContainersToRun(List<NodeSpec> containersToRun) {
final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream()
.collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
nodeAgentContextsByHostname.forEach((hostname, context) ->
nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context)
);
}
private void updateNodeAgentMetrics() {
int numberContainersWaitingImage = 0;
int numberOfNewUnhandledExceptions = 0;
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++;
numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions();
}
numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
@Override
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ofSeconds(0);
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(nodeAgent -> {
nodeAgent.suspend();
nodeAgent.stopServices();
});
}
@Override
public void start() {
metricsScheduler.scheduleAtFixedRate(() -> {
try {
updateNodeAgentMetrics();
nodeAgentWithSchedulerByHostname.values().forEach(NodeAgent::updateContainerNodeMetrics);
} catch (Throwable e) {
logger.warning("Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
aclMaintainer.ifPresent(maintainer -> {
int delay = 120;
aclScheduler.scheduleWithFixedDelay(() -> {
if (!isFrozen()) maintainer.converge();
}, 30, delay, TimeUnit.SECONDS);
});
}
@Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop);
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown");
}
} while (!metricsScheduler.isTerminated() || !aclScheduler.isTerminated());
}
private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) {
final HashSet<T> result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
@Override public void stopServices() { nodeAgent.stopServices(); }
@Override public void suspend() { nodeAgent.suspend(); }
@Override public void start() { nodeAgent.start(); }
@Override public void stop() { nodeAgent.stop(); }
@Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); }
@Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); }
@Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context) { nodeAgentScheduler.scheduleTickWith(context); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} |
Seems weird that the LoadbalancerResponse gets the nodeRepository... Looks as if it really needs the applicationId and a list of loadbalancers | private HttpResponse handleGET(HttpRequest request) {
String path = request.getUri().getPath();
if (path.equals("/loadbalancers/v1/")) return new LoadBalancersResponse(request, nodeRepository);
throw new NotFoundException("Nothing at path '" + path + "'");
} | if (path.equals("/loadbalancers/v1/")) return new LoadBalancersResponse(request, nodeRepository); | private HttpResponse handleGET(HttpRequest request) {
String path = request.getUri().getPath();
if (path.equals("/loadbalancers/v1/")) return new LoadBalancersResponse(request, nodeRepository);
throw new NotFoundException("Nothing at path '" + path + "'");
} | class LoadBalancersApiHandler extends LoggingRequestHandler {
private final NodeRepository nodeRepository;
@Inject
public LoadBalancersApiHandler(LoggingRequestHandler.Context parentCtx, NodeRepository nodeRepository) {
super(parentCtx);
this.nodeRepository = nodeRepository;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (NotFoundException | NoSuchNodeException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
} | class LoadBalancersApiHandler extends LoggingRequestHandler {
private final NodeRepository nodeRepository;
@Inject
public LoadBalancersApiHandler(LoggingRequestHandler.Context parentCtx, NodeRepository nodeRepository) {
super(parentCtx);
this.nodeRepository = nodeRepository;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (NotFoundException | NoSuchNodeException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
} |
It's because the method to call, `readLoadBalancers()` or `readLoadBalancers(ApplicationId)`, depends on a request parameter. It could take a list of all load balancers and an application ID, but then I need to reimplement filtering load balancers by application in this class. Alternatively I could introduce a `LoadBalancerList` that wraps `List<LoadBalancer>` and migrate callers to that, but since there's only one filter condition (application) I currently don't see the need. | private HttpResponse handleGET(HttpRequest request) {
String path = request.getUri().getPath();
if (path.equals("/loadbalancers/v1/")) return new LoadBalancersResponse(request, nodeRepository);
throw new NotFoundException("Nothing at path '" + path + "'");
} | if (path.equals("/loadbalancers/v1/")) return new LoadBalancersResponse(request, nodeRepository); | private HttpResponse handleGET(HttpRequest request) {
String path = request.getUri().getPath();
if (path.equals("/loadbalancers/v1/")) return new LoadBalancersResponse(request, nodeRepository);
throw new NotFoundException("Nothing at path '" + path + "'");
} | class LoadBalancersApiHandler extends LoggingRequestHandler {
private final NodeRepository nodeRepository;
@Inject
public LoadBalancersApiHandler(LoggingRequestHandler.Context parentCtx, NodeRepository nodeRepository) {
super(parentCtx);
this.nodeRepository = nodeRepository;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (NotFoundException | NoSuchNodeException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
} | class LoadBalancersApiHandler extends LoggingRequestHandler {
private final NodeRepository nodeRepository;
@Inject
public LoadBalancersApiHandler(LoggingRequestHandler.Context parentCtx, NodeRepository nodeRepository) {
super(parentCtx);
this.nodeRepository = nodeRepository;
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (NotFoundException | NoSuchNodeException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
} |
Given these contains double fields - are they necessarily identical? One is fetched from Docker daemon, the other from NodeSpec (sending doubles through Docker daemon and back - is that identity or may skew the doubles?). Do a loosened equality check? | private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
double cpuCap = context.node().getOwner()
.map(NodeSpec.Owner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value();
ContainerResources wantedContainerResources = ContainerResources.from(
cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb());
if (wantedContainerResources.equals(existingContainer.resources)) return;
context.log(logger, "Container should be running with different resource allocation, wanted: %s, current: %s",
wantedContainerResources, existingContainer.resources);
if (context.node().getState() == Node.State.active) {
orchestratorSuspendNode(context);
}
dockerOperations.updateContainer(context, wantedContainerResources);
} | if (wantedContainerResources.equals(existingContainer.resources)) return; | private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
double cpuCap = context.node().getOwner()
.map(NodeSpec.Owner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value();
ContainerResources wantedContainerResources = ContainerResources.from(
cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb());
if (wantedContainerResources.equals(existingContainer.resources)) return;
context.log(logger, "Container should be running with different resource allocation, wanted: %s, current: %s",
wantedContainerResources, existingContainer.resources);
orchestratorSuspendNode(context);
dockerOperations.updateContainer(context, wantedContainerResources);
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
this.serviceRestarter = service -> {
NodeAgentContext context = contextSupplier.currentContext();
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stop() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
filebeatRestarter.shutdown();
contextSupplier.interrupt();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(context.node().getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(context, container))
.map(container -> {
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, container);
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(context.node(), existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (context.node().getState() == Node.State.active) {
orchestratorSuspendNode(context);
}
try {
if (context.node().getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
} else {
updateContainerIfNeeded(context, container.get());
}
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
this.serviceRestarter = service -> {
NodeAgentContext context = contextSupplier.currentContext();
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stop() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
filebeatRestarter.shutdown();
contextSupplier.interrupt();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(context.node().getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(context, container))
.map(container -> {
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, container);
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(context.node(), existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
orchestratorSuspendNode(context);
try {
if (context.node().getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
} else {
updateContainerIfNeeded(context, container.get());
}
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
if (context.node().getState() != Node.State.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
Added. | private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
double cpuCap = context.node().getOwner()
.map(NodeSpec.Owner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value();
ContainerResources wantedContainerResources = ContainerResources.from(
cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb());
if (wantedContainerResources.equals(existingContainer.resources)) return;
context.log(logger, "Container should be running with different resource allocation, wanted: %s, current: %s",
wantedContainerResources, existingContainer.resources);
if (context.node().getState() == Node.State.active) {
orchestratorSuspendNode(context);
}
dockerOperations.updateContainer(context, wantedContainerResources);
} | if (wantedContainerResources.equals(existingContainer.resources)) return; | private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
double cpuCap = context.node().getOwner()
.map(NodeSpec.Owner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value();
ContainerResources wantedContainerResources = ContainerResources.from(
cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb());
if (wantedContainerResources.equals(existingContainer.resources)) return;
context.log(logger, "Container should be running with different resource allocation, wanted: %s, current: %s",
wantedContainerResources, existingContainer.resources);
orchestratorSuspendNode(context);
dockerOperations.updateContainer(context, wantedContainerResources);
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
this.serviceRestarter = service -> {
NodeAgentContext context = contextSupplier.currentContext();
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stop() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
filebeatRestarter.shutdown();
contextSupplier.interrupt();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(context.node().getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(context, container))
.map(container -> {
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, container);
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(context.node(), existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (context.node().getState() == Node.State.active) {
orchestratorSuspendNode(context);
}
try {
if (context.node().getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
} else {
updateContainerIfNeeded(context, container.get());
}
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
this.serviceRestarter = service -> {
NodeAgentContext context = contextSupplier.currentContext();
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stop() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
filebeatRestarter.shutdown();
contextSupplier.interrupt();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(context.node().getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(context, container))
.map(container -> {
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, container);
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(context.node(), existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
orchestratorSuspendNode(context);
try {
if (context.node().getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
} else {
updateContainerIfNeeded(context, container.get());
}
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
if (context.node().getState() != Node.State.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
Say `app` has pinned to 8. Calling this with `targetMajorVersion = 7` you keep `app`. `app` _has_ pinned to another version than the given one. (Current doc.) `app` _hasn't_ pinned to _an earlier_ version than the given one. (Previous doc.) | public ApplicationList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) {
return listOf(list.stream().filter(a -> a.majorVersion().orElse(a.deploymentSpec().majorVersion().orElse(defaultMajorVersion))
>= targetMajorVersion));
} | >= targetMajorVersion)); | public ApplicationList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) {
return listOf(list.stream().filter(a -> a.majorVersion().orElse(a.deploymentSpec().majorVersion().orElse(defaultMajorVersion))
>= targetMajorVersion));
} | class ApplicationList {
private final ImmutableList<Application> list;
private ApplicationList(Iterable<Application> applications) {
this.list = ImmutableList.copyOf(applications);
}
public static ApplicationList from(Iterable<Application> applications) {
return new ApplicationList(applications);
}
public static ApplicationList from(Collection<ApplicationId> ids, ApplicationController applications) {
return listOf(ids.stream().map(applications::require));
}
/** Returns the applications in this as an immutable list */
public List<Application> asList() { return list; }
/** Returns the ids of the applications in this as an immutable list */
public List<ApplicationId> idList() { return ImmutableList.copyOf(list.stream().map(Application::id)::iterator); }
public boolean isEmpty() { return list.isEmpty(); }
public int size() { return list.size(); }
/** Returns the subset of applications which are upgrading (to any version), not considering block windows. */
public ApplicationList upgrading() {
return listOf(list.stream().filter(application -> application.change().platform().isPresent()));
}
/** Returns the subset of applications which are currently upgrading to the given version */
public ApplicationList upgradingTo(Version version) {
return listOf(list.stream().filter(application -> isUpgradingTo(version, application)));
}
/** Returns the subset of applications which are not pinned to a certain Vespa version. */
public ApplicationList notPinned() {
return listOf(list.stream().filter(application -> ! application.change().isPinned()));
}
/** Returns the subset of applications which are currently not upgrading to the given version */
public ApplicationList notUpgradingTo(Version version) {
return listOf(list.stream().filter(application -> ! isUpgradingTo(version, application)));
}
/**
* Returns the subset of applications which are currently not upgrading to the given version,
* or returns all if no version is specified
*/
public ApplicationList notUpgradingTo(Optional<Version> version) {
if ( ! version.isPresent()) return this;
return notUpgradingTo(version.get());
}
/** Returns the subset of applications which have changes left to deploy; blocked, or deploying */
public ApplicationList withChanges() {
return listOf(list.stream().filter(application -> application.change().isPresent() || application.outstandingChange().isPresent()));
}
/** Returns the subset of applications which are currently not deploying a change */
public ApplicationList notDeploying() {
return listOf(list.stream().filter(application -> ! application.change().isPresent()));
}
/** Returns the subset of applications which currently does not have any failing jobs */
public ApplicationList notFailing() {
return listOf(list.stream().filter(application -> ! application.deploymentJobs().hasFailures()));
}
/** Returns the subset of applications which currently have failing jobs */
public ApplicationList failing() {
return listOf(list.stream().filter(application -> application.deploymentJobs().hasFailures()));
}
/** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */
public ApplicationList failingUpgradeToVersionSince(Version version, Instant threshold) {
return listOf(list.stream().filter(application -> failingUpgradeToVersionSince(application, version, threshold)));
}
/** Returns the subset of applications which have been failing an application change since the given instant */
public ApplicationList failingApplicationChangeSince(Instant threshold) {
return listOf(list.stream().filter(application -> failingApplicationChangeSince(application, threshold)));
}
/** Returns the subset of applications which currently does not have any failing jobs on the given version */
public ApplicationList notFailingOn(Version version) {
return listOf(list.stream().filter(application -> ! failingOn(version, application)));
}
/** Returns the subset of applications which have at least one production deployment */
public ApplicationList hasDeployment() {
return listOf(list.stream().filter(a -> !a.productionDeployments().isEmpty()));
}
/** Returns the subset of applications which started failing on the given version */
public ApplicationList startedFailingOn(Version version) {
return listOf(list.stream().filter(application -> ! JobList.from(application).firstFailing().on(version).isEmpty()));
}
/** Returns the subset of applications which has the given upgrade policy */
public ApplicationList with(UpgradePolicy policy) {
return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() == policy));
}
/** Returns the subset of applications which does not have the given upgrade policy */
public ApplicationList without(UpgradePolicy policy) {
return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() != policy));
}
/** Returns the subset of applications which have at least one deployment on a lower version than the given one */
public ApplicationList onLowerVersionThan(Version version) {
return listOf(list.stream()
.filter(a -> a.productionDeployments().values().stream()
.anyMatch(d -> d.version().isBefore(version))));
}
/** Returns the subset of applications which have a project ID */
public ApplicationList withProjectId() {
return listOf(list.stream().filter(a -> a.deploymentJobs().projectId().isPresent()));
}
/** Returns the subset of applications which have at least one production deployment */
public ApplicationList hasProductionDeployment() {
return listOf(list.stream().filter(a -> ! a.productionDeployments().isEmpty()));
}
/** Returns the subset of applications that are allowed to upgrade at the given time */
public ApplicationList canUpgradeAt(Instant instant) {
return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant)));
}
/**
* Returns the subset of applications that hasn't pinned to another major version than the given one.
*
* @param targetMajorVersion the target major version which applications returned allows upgrading to
* @param defaultMajorVersion the default major version to assume for applications not specifying one
*/
/** Returns the first n application in this (or all, if there are less than n). */
public ApplicationList first(int n) {
if (list.size() < n) return this;
return new ApplicationList(list.subList(0, n));
}
/**
* Returns this list sorted by increasing deployed version.
* If multiple versions are deployed the oldest is used.
* Applications without any deployments are ordered first.
*/
public ApplicationList byIncreasingDeployedVersion() {
return listOf(list.stream().sorted(Comparator.comparing(application -> application.oldestDeployedPlatform().orElse(Version.emptyVersion))));
}
private static boolean isUpgradingTo(Version version, Application application) {
return application.change().platform().equals(Optional.of(version));
}
private static boolean failingOn(Version version, Application application) {
return ! JobList.from(application)
.failing()
.lastCompleted().on(version)
.isEmpty();
}
private static boolean failingUpgradeToVersionSince(Application application, Version version, Instant threshold) {
return ! JobList.from(application)
.not().failingApplicationChange()
.firstFailing().before(threshold)
.lastCompleted().on(version)
.isEmpty();
}
private static boolean failingApplicationChangeSince(Application application, Instant threshold) {
return ! JobList.from(application)
.failingApplicationChange()
.firstFailing().before(threshold)
.isEmpty();
}
/** Convenience converter from a stream to an ApplicationList */
private static ApplicationList listOf(Stream<Application> applications) {
return from(applications::iterator);
}
} | class ApplicationList {
private final ImmutableList<Application> list;
private ApplicationList(Iterable<Application> applications) {
this.list = ImmutableList.copyOf(applications);
}
public static ApplicationList from(Iterable<Application> applications) {
return new ApplicationList(applications);
}
public static ApplicationList from(Collection<ApplicationId> ids, ApplicationController applications) {
return listOf(ids.stream().map(applications::require));
}
/** Returns the applications in this as an immutable list */
public List<Application> asList() { return list; }
/** Returns the ids of the applications in this as an immutable list */
public List<ApplicationId> idList() { return ImmutableList.copyOf(list.stream().map(Application::id)::iterator); }
public boolean isEmpty() { return list.isEmpty(); }
public int size() { return list.size(); }
/** Returns the subset of applications which are upgrading (to any version), not considering block windows. */
public ApplicationList upgrading() {
return listOf(list.stream().filter(application -> application.change().platform().isPresent()));
}
/** Returns the subset of applications which are currently upgrading to the given version */
public ApplicationList upgradingTo(Version version) {
return listOf(list.stream().filter(application -> isUpgradingTo(version, application)));
}
/** Returns the subset of applications which are not pinned to a certain Vespa version. */
public ApplicationList notPinned() {
return listOf(list.stream().filter(application -> ! application.change().isPinned()));
}
/** Returns the subset of applications which are currently not upgrading to the given version */
public ApplicationList notUpgradingTo(Version version) {
return listOf(list.stream().filter(application -> ! isUpgradingTo(version, application)));
}
/**
* Returns the subset of applications which are currently not upgrading to the given version,
* or returns all if no version is specified
*/
public ApplicationList notUpgradingTo(Optional<Version> version) {
if ( ! version.isPresent()) return this;
return notUpgradingTo(version.get());
}
/** Returns the subset of applications which have changes left to deploy; blocked, or deploying */
public ApplicationList withChanges() {
return listOf(list.stream().filter(application -> application.change().isPresent() || application.outstandingChange().isPresent()));
}
/** Returns the subset of applications which are currently not deploying a change */
public ApplicationList notDeploying() {
return listOf(list.stream().filter(application -> ! application.change().isPresent()));
}
/** Returns the subset of applications which currently does not have any failing jobs */
public ApplicationList notFailing() {
return listOf(list.stream().filter(application -> ! application.deploymentJobs().hasFailures()));
}
/** Returns the subset of applications which currently have failing jobs */
public ApplicationList failing() {
return listOf(list.stream().filter(application -> application.deploymentJobs().hasFailures()));
}
/** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */
public ApplicationList failingUpgradeToVersionSince(Version version, Instant threshold) {
return listOf(list.stream().filter(application -> failingUpgradeToVersionSince(application, version, threshold)));
}
/** Returns the subset of applications which have been failing an application change since the given instant */
public ApplicationList failingApplicationChangeSince(Instant threshold) {
return listOf(list.stream().filter(application -> failingApplicationChangeSince(application, threshold)));
}
/** Returns the subset of applications which currently does not have any failing jobs on the given version */
public ApplicationList notFailingOn(Version version) {
return listOf(list.stream().filter(application -> ! failingOn(version, application)));
}
/** Returns the subset of applications which have at least one production deployment */
public ApplicationList hasDeployment() {
return listOf(list.stream().filter(a -> !a.productionDeployments().isEmpty()));
}
/** Returns the subset of applications which started failing on the given version */
public ApplicationList startedFailingOn(Version version) {
return listOf(list.stream().filter(application -> ! JobList.from(application).firstFailing().on(version).isEmpty()));
}
/** Returns the subset of applications which has the given upgrade policy */
public ApplicationList with(UpgradePolicy policy) {
return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() == policy));
}
/** Returns the subset of applications which does not have the given upgrade policy */
public ApplicationList without(UpgradePolicy policy) {
return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() != policy));
}
/** Returns the subset of applications which have at least one deployment on a lower version than the given one */
public ApplicationList onLowerVersionThan(Version version) {
return listOf(list.stream()
.filter(a -> a.productionDeployments().values().stream()
.anyMatch(d -> d.version().isBefore(version))));
}
/** Returns the subset of applications which have a project ID */
public ApplicationList withProjectId() {
return listOf(list.stream().filter(a -> a.deploymentJobs().projectId().isPresent()));
}
/** Returns the subset of applications which have at least one production deployment */
public ApplicationList hasProductionDeployment() {
return listOf(list.stream().filter(a -> ! a.productionDeployments().isEmpty()));
}
/** Returns the subset of applications that are allowed to upgrade at the given time */
public ApplicationList canUpgradeAt(Instant instant) {
return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant)));
}
/**
* Returns the subset of applications that hasn't pinned to another major version than the given one.
*
* @param targetMajorVersion the target major version which applications returned allows upgrading to
* @param defaultMajorVersion the default major version to assume for applications not specifying one
*/
/** Returns the first n application in this (or all, if there are less than n). */
public ApplicationList first(int n) {
if (list.size() < n) return this;
return new ApplicationList(list.subList(0, n));
}
/**
* Returns this list sorted by increasing deployed version.
* If multiple versions are deployed the oldest is used.
* Applications without any deployments are ordered first.
*/
public ApplicationList byIncreasingDeployedVersion() {
return listOf(list.stream().sorted(Comparator.comparing(application -> application.oldestDeployedPlatform().orElse(Version.emptyVersion))));
}
private static boolean isUpgradingTo(Version version, Application application) {
return application.change().platform().equals(Optional.of(version));
}
private static boolean failingOn(Version version, Application application) {
return ! JobList.from(application)
.failing()
.lastCompleted().on(version)
.isEmpty();
}
private static boolean failingUpgradeToVersionSince(Application application, Version version, Instant threshold) {
return ! JobList.from(application)
.not().failingApplicationChange()
.firstFailing().before(threshold)
.lastCompleted().on(version)
.isEmpty();
}
private static boolean failingApplicationChangeSince(Application application, Instant threshold) {
return ! JobList.from(application)
.failingApplicationChange()
.firstFailing().before(threshold)
.isEmpty();
}
/** Convenience converter from a stream to an ApplicationList */
private static ApplicationList listOf(Stream<Application> applications) {
return from(applications::iterator);
}
} |
This method is always called when active? | private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
double cpuCap = context.node().getOwner()
.map(NodeSpec.Owner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value();
ContainerResources wantedContainerResources = ContainerResources.from(
cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb());
if (wantedContainerResources.equals(existingContainer.resources)) return;
context.log(logger, "Container should be running with different resource allocation, wanted: %s, current: %s",
wantedContainerResources, existingContainer.resources);
if (context.node().getState() == Node.State.active) {
orchestratorSuspendNode(context);
}
dockerOperations.updateContainer(context, wantedContainerResources);
} | if (context.node().getState() == Node.State.active) { | private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
double cpuCap = context.node().getOwner()
.map(NodeSpec.Owner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value();
ContainerResources wantedContainerResources = ContainerResources.from(
cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb());
if (wantedContainerResources.equals(existingContainer.resources)) return;
context.log(logger, "Container should be running with different resource allocation, wanted: %s, current: %s",
wantedContainerResources, existingContainer.resources);
orchestratorSuspendNode(context);
dockerOperations.updateContainer(context, wantedContainerResources);
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
this.serviceRestarter = service -> {
NodeAgentContext context = contextSupplier.currentContext();
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stop() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
filebeatRestarter.shutdown();
contextSupplier.interrupt();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(context.node().getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(context, container))
.map(container -> {
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, container);
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(context.node(), existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (context.node().getState() == Node.State.active) {
orchestratorSuspendNode(context);
}
try {
if (context.node().getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
} else {
updateContainerIfNeeded(context, container.get());
}
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
this.serviceRestarter = service -> {
NodeAgentContext context = contextSupplier.currentContext();
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stop() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
filebeatRestarter.shutdown();
contextSupplier.interrupt();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(context.node().getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(context, container))
.map(container -> {
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, container);
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(context.node(), existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
orchestratorSuspendNode(context);
try {
if (context.node().getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
} else {
updateContainerIfNeeded(context, container.get());
}
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
if (context.node().getState() != Node.State.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
True, moved this check to `orchestratorSuspendNode()` in case we sometime add the update to any other state. | private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
double cpuCap = context.node().getOwner()
.map(NodeSpec.Owner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value();
ContainerResources wantedContainerResources = ContainerResources.from(
cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb());
if (wantedContainerResources.equals(existingContainer.resources)) return;
context.log(logger, "Container should be running with different resource allocation, wanted: %s, current: %s",
wantedContainerResources, existingContainer.resources);
if (context.node().getState() == Node.State.active) {
orchestratorSuspendNode(context);
}
dockerOperations.updateContainer(context, wantedContainerResources);
} | if (context.node().getState() == Node.State.active) { | private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
double cpuCap = context.node().getOwner()
.map(NodeSpec.Owner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value();
ContainerResources wantedContainerResources = ContainerResources.from(
cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb());
if (wantedContainerResources.equals(existingContainer.resources)) return;
context.log(logger, "Container should be running with different resource allocation, wanted: %s, current: %s",
wantedContainerResources, existingContainer.resources);
orchestratorSuspendNode(context);
dockerOperations.updateContainer(context, wantedContainerResources);
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
this.serviceRestarter = service -> {
NodeAgentContext context = contextSupplier.currentContext();
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stop() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
filebeatRestarter.shutdown();
contextSupplier.interrupt();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(context.node().getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(context, container))
.map(container -> {
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, container);
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(context.node(), existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (context.node().getState() == Node.State.active) {
orchestratorSuspendNode(context);
}
try {
if (context.node().getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
} else {
updateContainerIfNeeded(context, container.get());
}
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
this.serviceRestarter = service -> {
NodeAgentContext context = contextSupplier.currentContext();
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stop() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
filebeatRestarter.shutdown();
contextSupplier.interrupt();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(context.node().getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(context, container))
.map(container -> {
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, container);
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(context.node(), existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
orchestratorSuspendNode(context);
try {
if (context.node().getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
} else {
updateContainerIfNeeded(context, container.get());
}
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
if (context.node().getState() != Node.State.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
Much better! | private List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
List<Node> removed = new ArrayList<>();
if (node.type().isDockerHost()) {
getChildNodes(node.hostname()).stream()
.filter(child -> force || canRemove(child, true))
.forEach(removed::add);
}
if (force || canRemove(node, false)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
} | if (node.type().isDockerHost()) { | private List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
List<Node> removed = new ArrayList<>();
if (node.type().isDockerHost()) {
list().childrenOf(node).asList().stream()
.filter(child -> force || canRemove(child, true))
.forEach(removed::add);
}
if (force || canRemove(node, false)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
private final OsVersions osVersions;
private final Flags flags;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()), config.useCuratorClientCache());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
this.osVersions = new OsVersions(this.db);
this.flags = new Flags(this.db);
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns feature flags of this node repository */
public Flags flags() {
return flags;
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return db.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedPorts.add(22);
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.parentsOf(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, Collections.emptySet(), trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = new NodeList(getNodes());
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> ipAddressPool, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), ipAddressPool, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), node -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
if (node.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + node.hostname() + ": It has a hardware failure");
return db.writeTo(Node.State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(getChildNodes(hostname).stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if (!hostnamesNotAllowedToDirty.isEmpty()) {
throw new IllegalArgumentException("Could not deallocate " + hostname + ": " +
String.join(", ", hostnamesNotAllowedToDirty) + " must be in either provisioned, failed or parked state");
}
return nodesToDirty.stream()
.map(node -> setDirty(node, agent, reason))
.collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be in state dirty, but was in " + node.state());
}
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
return removeRecursively(node, false);
}
/**
* Returns whether given node can be removed. Removal is allowed if:
* Tenant node: node is unallocated
* Non-Docker-container node: iff in state provisioned|failed|parked
* Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|ready
*/
private boolean canRemove(Node node, boolean deletingAsChild) {
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
throw new IllegalArgumentException("Node is currently allocated and cannot be removed: " +
node.allocation().get());
}
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !deletingAsChild) {
if (node.state() != Node.State.ready) {
throw new IllegalArgumentException(
String.format("Docker container %s can only be removed when in ready state", node.hostname()));
}
} else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked,
Node.State.ready);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Child node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
} else {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
private final OsVersions osVersions;
private final Flags flags;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()), config.useCuratorClientCache());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
this.osVersions = new OsVersions(this.db);
this.flags = new Flags(this.db);
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns feature flags of this node repository */
public Flags flags() {
return flags;
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return new NodeList(getNodes());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedPorts.add(22);
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.parentsOf(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, Collections.emptySet(), trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> ipAddressPool, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), ipAddressPool, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), node -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
if (node.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + node.hostname() + ": It has a hardware failure");
return db.writeTo(Node.State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if (!hostnamesNotAllowedToDirty.isEmpty()) {
throw new IllegalArgumentException("Could not deallocate " + hostname + ": " +
String.join(", ", hostnamesNotAllowedToDirty) + " must be in either provisioned, failed or parked state");
}
return nodesToDirty.stream()
.map(node -> setDirty(node, agent, reason))
.collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be in state dirty, but was in " + node.state());
}
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
return removeRecursively(node, false);
}
/**
* Returns whether given node can be removed. Removal is allowed if:
* Tenant node: node is unallocated
* Non-Docker-container node: iff in state provisioned|failed|parked
* Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|ready
*/
private boolean canRemove(Node node, boolean deletingAsChild) {
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
throw new IllegalArgumentException("Node is currently allocated and cannot be removed: " +
node.allocation().get());
}
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !deletingAsChild) {
if (node.state() != Node.State.ready) {
throw new IllegalArgumentException(
String.format("Docker container %s can only be removed when in ready state", node.hostname()));
}
} else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked,
Node.State.ready);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Child node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
} else {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
} |
this log is to show reqeust url to help debug. | public void handleRequest(BaseRequest request) throws Exception {
BaseResponse response = new BaseResponse();
LOG.debug("receive http request. url=", request.getRequest().uri());
execute(request, response);
} | LOG.debug("receive http request. url=", request.getRequest().uri()); | public void handleRequest(BaseRequest request) throws Exception {
BaseResponse response = new BaseResponse();
LOG.debug("receive http request. url={}", request.getRequest().uri());
execute(request, response);
} | class BaseAction implements IAction {
private static final Logger LOG = LogManager.getLogger(BaseAction.class);
protected QeService qeService = null;
protected ActionController controller;
protected Catalog catalog;
public BaseAction(ActionController controller) {
this.controller = controller;
this.catalog = Catalog.getInstance();
}
public QeService getQeService() {
return qeService;
}
public void setQeService(QeService qeService) {
this.qeService = qeService;
}
@Override
public abstract void execute(BaseRequest request, BaseResponse response) throws DdlException;
protected void writeResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status) {
FullHttpResponse responseObj = null;
try {
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes("UTF-8")));
} catch (UnsupportedEncodingException e) {
LOG.warn("get exception.", e);
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes()));
}
Preconditions.checkNotNull(responseObj);
HttpMethod method = request.getRequest().method();
checkDefaultContentTypeHeader(response, responseObj);
if (!method.equals(HttpMethod.HEAD)) {
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH,
String.valueOf(responseObj.content().readableBytes()));
}
writeCustomHeaders(response, responseObj);
writeCookies(response, responseObj);
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
request.getContext().write(responseObj).addListener(ChannelFutureListener.CLOSE);
} else {
responseObj.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
request.getContext().write(responseObj);
}
}
protected void writeFileResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status,
File resFile) {
HttpResponse responseObj = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
if (HttpHeaders.isKeepAlive(request.getRequest())) {
response.updateHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
}
ChannelFuture sendFileFuture;
ChannelFuture lastContentFuture;
RandomAccessFile rafFile;
try {
rafFile = new RandomAccessFile(resFile, "r");
long fileLength = 0;
fileLength = rafFile.length();
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(fileLength));
writeCookies(response, responseObj);
writeCustomHeaders(response, responseObj);
request.getContext().write(responseObj);
if (request.getContext().pipeline().get(SslHandler.class) == null) {
sendFileFuture = request.getContext().write(new DefaultFileRegion(rafFile.getChannel(), 0, fileLength),
request.getContext().newProgressivePromise());
lastContentFuture = request.getContext().writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
sendFileFuture = request.getContext().writeAndFlush(
new HttpChunkedInput(new ChunkedFile(rafFile, 0, fileLength, 8192)),
request.getContext().newProgressivePromise());
lastContentFuture = sendFileFuture;
}
} catch (FileNotFoundException ignore) {
writeResponse(request, response, HttpResponseStatus.NOT_FOUND);
return;
} catch (IOException e1) {
writeResponse(request, response, HttpResponseStatus.INTERNAL_SERVER_ERROR);
return;
}
sendFileFuture.addListener(new ChannelProgressiveFutureListener() {
@Override
public void operationProgressed(ChannelProgressiveFuture future, long progress, long total) {
if (total < 0) {
LOG.debug("{} Transfer progress: {}", future.channel(), progress);
} else {
LOG.debug("{} Transfer progress: {} / {}", future.channel(), progress, total);
}
}
@Override
public void operationComplete(ChannelProgressiveFuture future) {
LOG.debug("{} Transfer complete.", future.channel());
if (!future.isSuccess()) {
Throwable cause = future.cause();
LOG.error("something wrong. ", cause);
}
}
});
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
}
protected void checkDefaultContentTypeHeader(BaseResponse response, Object responseOj) {
List<String> header = response.getCustomHeaders().get(HttpHeaders.Names.CONTENT_TYPE);
if (header == null) {
response.updateHeader(HttpHeaders.Names.CONTENT_TYPE, "text/html");
}
}
protected void writeCustomHeaders(BaseResponse response, HttpResponse responseObj) {
for (Map.Entry<String, List<String>> entry : response.getHeaders().entrySet()) {
responseObj.headers().add(entry.getKey(), entry.getValue());
}
}
protected void writeCookies(BaseResponse response, HttpResponse responseObj) {
for (Cookie cookie : response.getCookies()) {
responseObj.headers().add(HttpHeaders.Names.SET_COOKIE, ServerCookieEncoder.encode(cookie));
}
}
public static class AuthorizationInfo {
public String fullUserName;
public String password;
public String cluster;
}
public boolean parseAuth(BaseRequest request, AuthorizationInfo authInfo) {
String encodedAuthString = request.getAuthorizationHeader();
if (Strings.isNullOrEmpty(encodedAuthString)) {
return false;
}
String[] parts = encodedAuthString.split(" ");
if (parts.length != 2) {
return false;
}
encodedAuthString = parts[1];
ByteBuf buf = null;
try {
buf = Unpooled.copiedBuffer(ByteBuffer.wrap(encodedAuthString.getBytes()));
String authString = Base64.decode(buf).toString(CharsetUtil.UTF_8);
int index = authString.indexOf(":");
authInfo.fullUserName = authString.substring(0, index);
final String[] elements = authInfo.fullUserName.split("@");
if (elements != null && elements.length < 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER,
authInfo.fullUserName);
authInfo.cluster = SystemInfoService.DEFAULT_CLUSTER;
} else if (elements != null && elements.length == 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(elements[1], elements[0]);
authInfo.cluster = elements[1];
}
authInfo.password = authString.substring(index + 1);
} finally {
if (buf != null) {
buf.release();
}
}
return true;
}
private AuthorizationInfo checkAndGetUser(BaseRequest request)
throws UnauthorizedException {
AuthorizationInfo authInfo = new AuthorizationInfo();
if (!parseAuth(request, authInfo)) {
throw new UnauthorizedException("Need auth information.");
}
byte[] hashedPasswd = catalog.getUserMgr().getPassword(authInfo.fullUserName);
if (hashedPasswd == null) {
throw new UnauthorizedException("No such user(" + authInfo.fullUserName + ")");
}
if (!MysqlPassword.checkPlainPass(hashedPasswd, authInfo.password)) {
throw new UnauthorizedException("Password error");
}
return authInfo;
}
protected void checkAdmin(BaseRequest request) throws UnauthorizedException {
final AuthorizationInfo authInfo = checkAndGetUser(request);
if (!catalog.getUserMgr().isAdmin(authInfo.fullUserName)) {
throw new UnauthorizedException("Administrator needed");
}
}
protected void checkReadPriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_ONLY)) {
throw new UnauthorizedException("Read Privilege needed");
}
}
protected void checkWritePriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_WRITE)) {
throw new UnauthorizedException("Write Privilege needed");
}
}
public AuthorizationInfo getAuthorizationInfo(BaseRequest request)
throws UnauthorizedException {
return checkAndGetUser(request);
}
protected void writeAuthResponse(BaseRequest request, BaseResponse response) {
response.addHeader(HttpHeaders.Names.WWW_AUTHENTICATE, "Basic realm=\"\"");
writeResponse(request, response, HttpResponseStatus.UNAUTHORIZED);
}
} | class BaseAction implements IAction {
private static final Logger LOG = LogManager.getLogger(BaseAction.class);
protected QeService qeService = null;
protected ActionController controller;
protected Catalog catalog;
public BaseAction(ActionController controller) {
this.controller = controller;
this.catalog = Catalog.getInstance();
}
public QeService getQeService() {
return qeService;
}
public void setQeService(QeService qeService) {
this.qeService = qeService;
}
@Override
public abstract void execute(BaseRequest request, BaseResponse response) throws DdlException;
protected void writeResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status) {
FullHttpResponse responseObj = null;
try {
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes("UTF-8")));
} catch (UnsupportedEncodingException e) {
LOG.warn("get exception.", e);
responseObj = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status,
Unpooled.wrappedBuffer(response.getContent().toString().getBytes()));
}
Preconditions.checkNotNull(responseObj);
HttpMethod method = request.getRequest().method();
checkDefaultContentTypeHeader(response, responseObj);
if (!method.equals(HttpMethod.HEAD)) {
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH,
String.valueOf(responseObj.content().readableBytes()));
}
writeCustomHeaders(response, responseObj);
writeCookies(response, responseObj);
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
request.getContext().write(responseObj).addListener(ChannelFutureListener.CLOSE);
} else {
responseObj.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
request.getContext().write(responseObj);
}
}
protected void writeFileResponse(BaseRequest request, BaseResponse response, HttpResponseStatus status,
File resFile) {
HttpResponse responseObj = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
if (HttpHeaders.isKeepAlive(request.getRequest())) {
response.updateHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
}
ChannelFuture sendFileFuture;
ChannelFuture lastContentFuture;
RandomAccessFile rafFile;
try {
rafFile = new RandomAccessFile(resFile, "r");
long fileLength = 0;
fileLength = rafFile.length();
response.updateHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(fileLength));
writeCookies(response, responseObj);
writeCustomHeaders(response, responseObj);
request.getContext().write(responseObj);
if (request.getContext().pipeline().get(SslHandler.class) == null) {
sendFileFuture = request.getContext().write(new DefaultFileRegion(rafFile.getChannel(), 0, fileLength),
request.getContext().newProgressivePromise());
lastContentFuture = request.getContext().writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
sendFileFuture = request.getContext().writeAndFlush(
new HttpChunkedInput(new ChunkedFile(rafFile, 0, fileLength, 8192)),
request.getContext().newProgressivePromise());
lastContentFuture = sendFileFuture;
}
} catch (FileNotFoundException ignore) {
writeResponse(request, response, HttpResponseStatus.NOT_FOUND);
return;
} catch (IOException e1) {
writeResponse(request, response, HttpResponseStatus.INTERNAL_SERVER_ERROR);
return;
}
sendFileFuture.addListener(new ChannelProgressiveFutureListener() {
@Override
public void operationProgressed(ChannelProgressiveFuture future, long progress, long total) {
if (total < 0) {
LOG.debug("{} Transfer progress: {}", future.channel(), progress);
} else {
LOG.debug("{} Transfer progress: {} / {}", future.channel(), progress, total);
}
}
@Override
public void operationComplete(ChannelProgressiveFuture future) {
LOG.debug("{} Transfer complete.", future.channel());
if (!future.isSuccess()) {
Throwable cause = future.cause();
LOG.error("something wrong. ", cause);
}
}
});
boolean keepAlive = HttpHeaders.isKeepAlive(request.getRequest());
if (!keepAlive) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
}
protected void checkDefaultContentTypeHeader(BaseResponse response, Object responseOj) {
List<String> header = response.getCustomHeaders().get(HttpHeaders.Names.CONTENT_TYPE);
if (header == null) {
response.updateHeader(HttpHeaders.Names.CONTENT_TYPE, "text/html");
}
}
protected void writeCustomHeaders(BaseResponse response, HttpResponse responseObj) {
for (Map.Entry<String, List<String>> entry : response.getHeaders().entrySet()) {
responseObj.headers().add(entry.getKey(), entry.getValue());
}
}
protected void writeCookies(BaseResponse response, HttpResponse responseObj) {
for (Cookie cookie : response.getCookies()) {
responseObj.headers().add(HttpHeaders.Names.SET_COOKIE, ServerCookieEncoder.encode(cookie));
}
}
public static class AuthorizationInfo {
public String fullUserName;
public String password;
public String cluster;
}
public boolean parseAuth(BaseRequest request, AuthorizationInfo authInfo) {
String encodedAuthString = request.getAuthorizationHeader();
if (Strings.isNullOrEmpty(encodedAuthString)) {
return false;
}
String[] parts = encodedAuthString.split(" ");
if (parts.length != 2) {
return false;
}
encodedAuthString = parts[1];
ByteBuf buf = null;
try {
buf = Unpooled.copiedBuffer(ByteBuffer.wrap(encodedAuthString.getBytes()));
String authString = Base64.decode(buf).toString(CharsetUtil.UTF_8);
int index = authString.indexOf(":");
authInfo.fullUserName = authString.substring(0, index);
final String[] elements = authInfo.fullUserName.split("@");
if (elements != null && elements.length < 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER,
authInfo.fullUserName);
authInfo.cluster = SystemInfoService.DEFAULT_CLUSTER;
} else if (elements != null && elements.length == 2) {
authInfo.fullUserName = ClusterNamespace.getFullName(elements[1], elements[0]);
authInfo.cluster = elements[1];
}
authInfo.password = authString.substring(index + 1);
} finally {
if (buf != null) {
buf.release();
}
}
return true;
}
private AuthorizationInfo checkAndGetUser(BaseRequest request)
throws UnauthorizedException {
AuthorizationInfo authInfo = new AuthorizationInfo();
if (!parseAuth(request, authInfo)) {
throw new UnauthorizedException("Need auth information.");
}
byte[] hashedPasswd = catalog.getUserMgr().getPassword(authInfo.fullUserName);
if (hashedPasswd == null) {
throw new UnauthorizedException("No such user(" + authInfo.fullUserName + ")");
}
if (!MysqlPassword.checkPlainPass(hashedPasswd, authInfo.password)) {
throw new UnauthorizedException("Password error");
}
return authInfo;
}
protected void checkAdmin(BaseRequest request) throws UnauthorizedException {
final AuthorizationInfo authInfo = checkAndGetUser(request);
if (!catalog.getUserMgr().isAdmin(authInfo.fullUserName)) {
throw new UnauthorizedException("Administrator needed");
}
}
protected void checkReadPriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_ONLY)) {
throw new UnauthorizedException("Read Privilege needed");
}
}
protected void checkWritePriv(String fullUserName, String fullDbName)
throws UnauthorizedException {
if (!catalog.getUserMgr().checkAccess(fullUserName, fullDbName, AccessPrivilege.READ_WRITE)) {
throw new UnauthorizedException("Write Privilege needed");
}
}
public AuthorizationInfo getAuthorizationInfo(BaseRequest request)
throws UnauthorizedException {
return checkAndGetUser(request);
}
protected void writeAuthResponse(BaseRequest request, BaseResponse response) {
response.addHeader(HttpHeaders.Names.WWW_AUTHENTICATE, "Basic realm=\"\"");
writeResponse(request, response, HttpResponseStatus.UNAUTHORIZED);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.