comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
We should log the files being uploaded at Level.INFO. And it would be nice if the message would contain the actual list of files being uploaded, as sync() may choose to skip some. | public void processServiceDumpRequest(NodeAgentContext context) {
Instant startedAt = Instant.now();
NodeSpec nodeSpec = context.node();
ServiceDumpReport request = nodeSpec.reports().getReport(ServiceDumpReport.REPORT_ID, ServiceDumpReport.class)
.orElse(null);
if (request == null || request.isCompletedOrFailed()) {
context.log(log, Level.FINE, "No service dump requested or dump already completed/failed");
return;
}
if (isNullTimestamp(request.getCreatedMillisOrNull())) {
handleFailure(context, request, startedAt, null, "'createdMillis' is missing or null");
return;
}
String configId = request.configId();
if (configId == null) {
handleFailure(context, request, startedAt, null, "Service config id is missing from request");
return;
}
Instant expiry = expireAt(startedAt, request);
if (expiry.isBefore(startedAt)) {
handleFailure(context, request, startedAt, null, "Request already expired");
return;
}
try {
context.log(log, Level.FINE,
"Creating dump for " + configId + " requested at " + Instant.ofEpochMilli(request.getCreatedMillisOrNull()));
storeReport(context, createStartedReport(request, startedAt));
Path directoryInNode = context.pathInNodeUnderVespaHome("tmp/vespa-service-dump");
Path directoryOnHost = context.pathOnHostFromPathInNode(directoryInNode);
Files.deleteIfExists(directoryOnHost);
Files.createDirectory(directoryOnHost);
Path vespaJvmDumper = context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper");
CommandResult result = container.executeCommandInContainerAsRoot(
context, vespaJvmDumper.toString(), configId, directoryInNode.toString());
context.log(log, Level.FINE, "vespa-jvm-dumper exit code: " + result.getExitCode());
context.log(log, Level.FINE, "vespa-jvm-dumper output: " + result.getOutput());
if (result.getExitCode() > 0) {
handleFailure(context, request, startedAt, null, "Failed to create dump: " + result.getOutput());
return;
}
URI destination = serviceDumpDestination(nodeSpec, createDumpId(request));
context.log(log, Level.FINE, "Uploading files with destination " + destination + " and expiry " + expiry);
List<SyncFileInfo> files = dumpFiles(directoryOnHost, destination, expiry);
logFilesToUpload(context, files);
if (!syncClient.sync(context, files, Integer.MAX_VALUE)) {
handleFailure(context, request, startedAt, null, "Unable to upload all files");
return;
}
context.log(log, Level.FINE, "Upload complete");
storeReport(context, createSuccessReport(request, startedAt, destination));
} catch (Exception e) {
handleFailure(context, request, startedAt, e, e.getMessage());
}
} | logFilesToUpload(context, files); | public void processServiceDumpRequest(NodeAgentContext context) {
Instant startedAt = clock.instant();
NodeSpec nodeSpec = context.node();
ServiceDumpReport request = nodeSpec.reports().getReport(ServiceDumpReport.REPORT_ID, ServiceDumpReport.class)
.orElse(null);
if (request == null || request.isCompletedOrFailed()) {
context.log(log, Level.FINE, "No service dump requested or dump already completed/failed");
return;
}
if (isNullTimestamp(request.getCreatedMillisOrNull())) {
handleFailure(context, request, startedAt, "'createdMillis' is missing or null");
return;
}
String configId = request.configId();
if (configId == null) {
handleFailure(context, request, startedAt, "Service config id is missing from request");
return;
}
Instant expiry = expireAt(startedAt, request);
if (expiry.isBefore(startedAt)) {
handleFailure(context, request, startedAt, "Request already expired");
return;
}
UnixPath directoryInNode = new UnixPath(context.pathInNodeUnderVespaHome("tmp/vespa-service-dump"));
UnixPath directoryOnHost = new UnixPath(context.pathOnHostFromPathInNode(directoryInNode.toPath()));
try {
context.log(log, Level.INFO,
"Creating dump for " + configId + " requested at " + Instant.ofEpochMilli(request.getCreatedMillisOrNull()));
storeReport(context, createStartedReport(request, startedAt));
if (directoryOnHost.exists()) {
context.log(log, Level.INFO, "Removing existing directory '" + directoryOnHost +"'.");
directoryOnHost.deleteRecursively();
}
context.log(log, Level.INFO, "Creating '" + directoryOnHost +"'.");
directoryOnHost.createDirectory();
directoryOnHost.setPermissions("rwxrwxrwx");
UnixPath vespaJvmDumper = new UnixPath(context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper"));
context.log(log, Level.INFO, "Executing '" + vespaJvmDumper + "' with arguments '" + configId + "' and '" + directoryInNode + "'");
CommandResult result = container.executeCommandInContainerAsRoot(
context, vespaJvmDumper.toString(), configId, directoryInNode.toString());
context.log(log, Level.INFO, "vespa-jvm-dumper exited with code '" + result.getExitCode() + "' and output:\n" + result.getOutput());
if (result.getExitCode() > 0) {
handleFailure(context, request, startedAt, "Failed to create dump: " + result.getOutput());
return;
}
URI destination = serviceDumpDestination(nodeSpec, createDumpId(request));
context.log(log, Level.INFO, "Uploading files with destination " + destination + " and expiry " + expiry);
List<SyncFileInfo> files = dumpFiles(directoryOnHost.toPath(), destination, expiry);
if (!syncClient.sync(context, files, Integer.MAX_VALUE)) {
handleFailure(context, request, startedAt, "Unable to upload all files");
return;
}
context.log(log, Level.INFO, "Upload complete");
storeReport(context, createSuccessReport(clock, request, startedAt, destination));
} catch (Exception e) {
handleFailure(context, request, startedAt, e);
} finally {
if (directoryOnHost.exists()) {
context.log(log, Level.INFO, "Deleting directory '" + directoryOnHost +"'.");
directoryOnHost.deleteRecursively();
}
}
} | class VespaServiceDumperImpl implements VespaServiceDumper {
private static final Logger log = Logger.getLogger(VespaServiceDumperImpl.class.getName());
private final ContainerOperations container;
private final SyncClient syncClient;
private final NodeRepository nodeRepository;
public VespaServiceDumperImpl(ContainerOperations container, SyncClient syncClient, NodeRepository nodeRepository) {
this.container = container;
this.syncClient = syncClient;
this.nodeRepository = nodeRepository;
}
@Override
private List<SyncFileInfo> dumpFiles(Path directoryOnHost, URI destination, Instant expiry) {
return FileFinder.files(directoryOnHost).stream()
.flatMap(file -> SyncFileInfo.forServiceDump(destination, file.path(), expiry).stream())
.collect(Collectors.toList());
}
private void logFilesToUpload(NodeAgentContext context, List<SyncFileInfo> files) {
if (log.isLoggable(Level.FINE)) {
String message = files.stream()
.map(file -> file.source().toString())
.collect(Collectors.joining());
context.log(log, Level.FINE, message);
}
}
private static Instant expireAt(Instant startedAt, ServiceDumpReport request) {
return isNullTimestamp(request.expireAt())
? startedAt.plus(7, ChronoUnit.DAYS)
: Instant.ofEpochMilli(request.expireAt());
}
private void handleFailure(NodeAgentContext context, ServiceDumpReport request, Instant startedAt,
Exception failure, String message) {
if (failure != null) {
context.log(log, Level.WARNING, message, failure);
} else {
context.log(log, Level.WARNING, message);
}
ServiceDumpReport report = createErrorReport(request, startedAt, message);
storeReport(context, report);
}
private void storeReport(NodeAgentContext context, ServiceDumpReport report) {
NodeAttributes nodeAttributes = new NodeAttributes();
nodeAttributes.withReport(ServiceDumpReport.REPORT_ID, report.toJsonNode());
nodeRepository.updateNodeAttributes(context.hostname().value(), nodeAttributes);
}
private static ServiceDumpReport createStartedReport(ServiceDumpReport request, Instant startedAt) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, null, null, request.configId(),
request.expireAt(), null);
}
private static ServiceDumpReport createSuccessReport(ServiceDumpReport request, Instant startedAt, URI location) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), Instant.now().toEpochMilli(), null,
location.toString(), request.configId(), request.expireAt(), null);
}
private static ServiceDumpReport createErrorReport(ServiceDumpReport request, Instant startedAt, String message) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, Instant.now().toEpochMilli(), null,
request.configId(), request.expireAt(), message);
}
static String createDumpId(ServiceDumpReport request) {
String sanitizedConfigId = Lowercase.toLowerCase(request.configId()).replaceAll("[^a-z_0-9]", "-");
return sanitizedConfigId + "-" + request.getCreatedMillisOrNull().toString();
}
private static URI serviceDumpDestination(NodeSpec spec, String dumpId) {
URI archiveUri = spec.archiveUri().get();
String targetDirectory = "service-dump/" + dumpId;
return archiveUri.resolve(targetDirectory);
}
} | class VespaServiceDumperImpl implements VespaServiceDumper {
private static final Logger log = Logger.getLogger(VespaServiceDumperImpl.class.getName());
private final ContainerOperations container;
private final SyncClient syncClient;
private final NodeRepository nodeRepository;
private final Clock clock;
public VespaServiceDumperImpl(ContainerOperations container, SyncClient syncClient, NodeRepository nodeRepository) {
this(container, syncClient, nodeRepository, Clock.systemUTC());
}
VespaServiceDumperImpl(ContainerOperations container, SyncClient syncClient, NodeRepository nodeRepository,
Clock clock) {
this.container = container;
this.syncClient = syncClient;
this.nodeRepository = nodeRepository;
this.clock = clock;
}
@Override
private List<SyncFileInfo> dumpFiles(Path directoryOnHost, URI destination, Instant expiry) {
return FileFinder.files(directoryOnHost).stream()
.flatMap(file -> SyncFileInfo.forServiceDump(destination, file.path(), expiry).stream())
.collect(Collectors.toList());
}
private static Instant expireAt(Instant startedAt, ServiceDumpReport request) {
return isNullTimestamp(request.expireAt())
? startedAt.plus(7, ChronoUnit.DAYS)
: Instant.ofEpochMilli(request.expireAt());
}
private void handleFailure(NodeAgentContext context, ServiceDumpReport request, Instant startedAt, Exception failure) {
context.log(log, Level.WARNING, failure.toString(), failure);
ServiceDumpReport report = createErrorReport(clock, request, startedAt, failure.toString());
storeReport(context, report);
}
private void handleFailure(NodeAgentContext context, ServiceDumpReport request, Instant startedAt, String message) {
context.log(log, Level.WARNING, message);
ServiceDumpReport report = createErrorReport(clock, request, startedAt, message);
storeReport(context, report);
}
private void storeReport(NodeAgentContext context, ServiceDumpReport report) {
NodeAttributes nodeAttributes = new NodeAttributes();
nodeAttributes.withReport(ServiceDumpReport.REPORT_ID, report.toJsonNode());
nodeRepository.updateNodeAttributes(context.hostname().value(), nodeAttributes);
}
private static ServiceDumpReport createStartedReport(ServiceDumpReport request, Instant startedAt) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, null, null, request.configId(),
request.expireAt(), null);
}
private static ServiceDumpReport createSuccessReport(
Clock clock, ServiceDumpReport request, Instant startedAt, URI location) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), clock.instant().toEpochMilli(), null,
location.toString(), request.configId(), request.expireAt(), null);
}
private static ServiceDumpReport createErrorReport(
Clock clock, ServiceDumpReport request, Instant startedAt, String message) {
return new ServiceDumpReport(
request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, clock.instant().toEpochMilli(), null,
request.configId(), request.expireAt(), message);
}
static String createDumpId(ServiceDumpReport request) {
String sanitizedConfigId = Lowercase.toLowerCase(request.configId()).replaceAll("[^a-z_0-9]", "-");
return sanitizedConfigId + "-" + request.getCreatedMillisOrNull().toString();
}
private static URI serviceDumpDestination(NodeSpec spec, String dumpId) {
URI archiveUri = spec.archiveUri().get();
String targetDirectory = "service-dump/" + dumpId + "/";
return archiveUri.resolve(targetDirectory);
}
} |
```suggestion return c == '-' || isValidFirst(c); ``` | private static boolean isValidAny(char c) {
return (c == '_') || (c == '-') || ((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z')) || ((c >= '0') && (c <= '9'));
} | return (c == '_') || (c == '-') || ((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z')) || ((c >= '0') && (c <= '9')); | private static boolean isValidAny(char c) {
return c == '-' || isValidFirst(c);
} | class FeatureNames {
public static Reference asConstantFeature(String constantName) {
return Reference.simple("constant", quoteIfNecessary(constantName));
}
public static Reference asAttributeFeature(String attributeName) {
return Reference.simple("attribute", attributeName);
}
public static Reference asQueryFeature(String propertyName) {
return Reference.simple("query", quoteIfNecessary(propertyName));
}
/** Returns true if the given reference is an attribute, constant or query feature */
public static boolean isSimpleFeature(Reference reference) {
if ( ! reference.isSimple()) return false;
String name = reference.name();
return name.equals("attribute") || name.equals("constant") || name.equals("query");
}
/** Returns true if this is a constant */
public static boolean isConstantFeature(Reference reference) {
if ( ! isSimpleFeature(reference)) return false;
return reference.name().equals("constant");
}
/**
* Returns the single argument of the given feature name, without any quotes,
* or empty if it is not a valid query, attribute or constant feature name
*/
public static Optional<String> argumentOf(String feature) {
Optional<Reference> reference = Reference.simple(feature);
if ( reference.isEmpty()) return Optional.empty();
if ( ! ( reference.get().name().equals("attribute") ||
reference.get().name().equals("constant") ||
reference.get().name().equals("query")))
return Optional.empty();
return Optional.of(reference.get().arguments().expressions().get(0).toString());
}
private static String quoteIfNecessary(String s) {
if (notNeedQuotes(s))
return s;
else
return "\"" + s + "\"";
}
static boolean notNeedQuotes(String s) {
if (s.isEmpty()) return false;
if ( ! isValidFirst(s.charAt(0))) return false;
for (int i = 1; i < s.length(); i++) {
if (!isValidAny(s.charAt(i))) return false;
}
return true;
}
private static boolean isValidFirst(char c) {
return (c == '_') || ((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z')) || ((c >= '0') && (c <= '9'));
}
} | class FeatureNames {
public static Reference asConstantFeature(String constantName) {
return Reference.simple("constant", quoteIfNecessary(constantName));
}
public static Reference asAttributeFeature(String attributeName) {
return Reference.simple("attribute", attributeName);
}
public static Reference asQueryFeature(String propertyName) {
return Reference.simple("query", quoteIfNecessary(propertyName));
}
/** Returns true if the given reference is an attribute, constant or query feature */
public static boolean isSimpleFeature(Reference reference) {
if ( ! reference.isSimple()) return false;
String name = reference.name();
return name.equals("attribute") || name.equals("constant") || name.equals("query");
}
/** Returns true if this is a constant */
public static boolean isConstantFeature(Reference reference) {
if ( ! isSimpleFeature(reference)) return false;
return reference.name().equals("constant");
}
/**
* Returns the single argument of the given feature name, without any quotes,
* or empty if it is not a valid query, attribute or constant feature name
*/
public static Optional<String> argumentOf(String feature) {
Optional<Reference> reference = Reference.simple(feature);
if ( reference.isEmpty()) return Optional.empty();
if ( ! ( reference.get().name().equals("attribute") ||
reference.get().name().equals("constant") ||
reference.get().name().equals("query")))
return Optional.empty();
return Optional.of(reference.get().arguments().expressions().get(0).toString());
}
private static String quoteIfNecessary(String s) {
if (notNeedQuotes(s))
return s;
else
return "\"" + s + "\"";
}
static boolean notNeedQuotes(String s) {
if (s.isEmpty()) return false;
if ( ! isValidFirst(s.charAt(0))) return false;
for (int i = 1; i < s.length(); i++) {
if (!isValidAny(s.charAt(i))) return false;
}
return true;
}
private static boolean isValidFirst(char c) {
return (c == '_') || ((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z')) || ((c >= '0') && (c <= '9'));
}
} |
Nested flat ternatry :neutral_face: | private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? (search.getDocument() != null)
? rankProfileRegistry.resolve(search.getDocument(), inheritedName)
: rankProfileRegistry.get(search.getName(), inheritedName)
: rankProfileRegistry.getGlobal(inheritedName);
} | return (getSearch() != null) | private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? resolveInherited(search)
: rankProfileRegistry.getGlobal(inheritedName);
} | class RankProfile implements Cloneable {
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
public RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
throw new IllegalArgumentException("rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.");
}
}
return inherited;
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} | class RankProfile implements Cloneable {
private final static Logger log = Logger.getLogger(RankProfile.class.getName());
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
private RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
String msg = "rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.";
if (search.getDeployProperties().featureFlags().enforceRankProfileInheritance()) {
throw new IllegalArgumentException(msg);
} else {
log.warning(msg);
}
} else {
List<String> children = new ArrayList<>();
children.add(createFullyQualifiedName());
verifyNoInheritanceCycle(children, inherited);
}
}
return inherited;
}
private String createFullyQualifiedName() {
return (search != null)
? (search.getName() + "." + getName())
: getName();
}
private void verifyNoInheritanceCycle(List<String> children, RankProfile parent) {
children.add(parent.createFullyQualifiedName());
String root = children.get(0);
if (root.equals(parent.createFullyQualifiedName())) {
throw new IllegalArgumentException("There is a cycle in the inheritance for rank-profile '" + root + "' = " + children);
}
if (parent.getInherited() != null) {
verifyNoInheritanceCycle(children, parent.getInherited());
}
}
private RankProfile resolveInherited(ImmutableSearch search) {
SDDocumentType documentType = search.getDocument();
if (documentType != null) {
if (name.equals(inheritedName)) {
for (SDDocumentType baseType : documentType.getInheritedTypes()) {
RankProfile resolvedFromBase = rankProfileRegistry.resolve(baseType, inheritedName);
if (resolvedFromBase != null) return resolvedFromBase;
}
}
return rankProfileRegistry.resolve(documentType, inheritedName);
}
return rankProfileRegistry.get(search.getName(), inheritedName);
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} |
Not flat anymore. | private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? (search.getDocument() != null)
? rankProfileRegistry.resolve(search.getDocument(), inheritedName)
: rankProfileRegistry.get(search.getName(), inheritedName)
: rankProfileRegistry.getGlobal(inheritedName);
} | return (getSearch() != null) | private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? resolveInherited(search)
: rankProfileRegistry.getGlobal(inheritedName);
} | class RankProfile implements Cloneable {
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
public RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
throw new IllegalArgumentException("rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.");
}
}
return inherited;
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} | class RankProfile implements Cloneable {
private final static Logger log = Logger.getLogger(RankProfile.class.getName());
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
private RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
String msg = "rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.";
if (search.getDeployProperties().featureFlags().enforceRankProfileInheritance()) {
throw new IllegalArgumentException(msg);
} else {
log.warning(msg);
}
} else {
List<String> children = new ArrayList<>();
children.add(createFullyQualifiedName());
verifyNoInheritanceCycle(children, inherited);
}
}
return inherited;
}
private String createFullyQualifiedName() {
return (search != null)
? (search.getName() + "." + getName())
: getName();
}
private void verifyNoInheritanceCycle(List<String> children, RankProfile parent) {
children.add(parent.createFullyQualifiedName());
String root = children.get(0);
if (root.equals(parent.createFullyQualifiedName())) {
throw new IllegalArgumentException("There is a cycle in the inheritance for rank-profile '" + root + "' = " + children);
}
if (parent.getInherited() != null) {
verifyNoInheritanceCycle(children, parent.getInherited());
}
}
private RankProfile resolveInherited(ImmutableSearch search) {
SDDocumentType documentType = search.getDocument();
if (documentType != null) {
if (name.equals(inheritedName)) {
for (SDDocumentType baseType : documentType.getInheritedTypes()) {
RankProfile resolvedFromBase = rankProfileRegistry.resolve(baseType, inheritedName);
if (resolvedFromBase != null) return resolvedFromBase;
}
}
return rankProfileRegistry.resolve(documentType, inheritedName);
}
return rankProfileRegistry.get(search.getName(), inheritedName);
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} |
indentation seems off | public String getStartupCommand() {
if (useNewLogic) {
return "exec $ROOT/sbin/vespa-slobrok -N -p " + getRpcPort() + " -c " + getConfigId();
}
return "exec $ROOT/sbin/vespa-slobrok -p " + getRpcPort() + " -c " + getConfigId();
} | if (useNewLogic) { | public String getStartupCommand() {
if (useNewLogic) {
return "exec $ROOT/sbin/vespa-slobrok -N -p " + getRpcPort() + " -c " + getConfigId();
}
return "exec $ROOT/sbin/vespa-slobrok -p " + getRpcPort() + " -c " + getConfigId();
} | class Slobrok extends AbstractService implements StateserverConfig.Producer {
private static final long serialVersionUID = 1L;
public final static int BASEPORT = 19099;
public final boolean useNewLogic;
@Override
public void getConfig(StateserverConfig.Builder builder) {
builder.httpport(getHealthPort());
}
/**
* @param parent the parent ConfigProducer.
* @param index unique index for all slobroks
*/
public Slobrok(AbstractConfigProducer<?> parent, int index,
ModelContext.FeatureFlags featureFlags)
{
super(parent, "slobrok." + index);
this.useNewLogic = featureFlags.newLocationBrokerLogic();
portsMeta.on(0).tag("rpc").tag("admin").tag("status");
portsMeta.on(1).tag("http").tag("state");
setProp("index", index);
setProp("clustertype", "slobrok");
setProp("clustername", "admin");
}
@Override
public int getWantedPort() {
if (getId() == 1) {
return BASEPORT;
} else {
return 0;
}
}
@Override
public void allocatePorts(int start, PortAllocBridge from) {
if (start == 0) start = BASEPORT;
from.wantPort(start, "rpc");
from.allocatePort("http");
}
/**
* @return The number of ports needed by the slobrok.
*/
public int getPortCount() {
return 2;
}
/**
* @return The port on which this slobrok should respond
*/
private int getRpcPort() {
return getRelativePort(0);
}
/**
* @return The port on which the state server should respond
*/
@Override
public int getHealthPort() {
return getRelativePort(1);
}
/**
* @return The connection spec to this Slobrok
*/
public String getConnectionSpec() {
return "tcp/" + getHostName() + ":" + getRpcPort();
}
} | class Slobrok extends AbstractService implements StateserverConfig.Producer {
private static final long serialVersionUID = 1L;
public final static int BASEPORT = 19099;
public final boolean useNewLogic;
@Override
public void getConfig(StateserverConfig.Builder builder) {
builder.httpport(getHealthPort());
}
/**
* @param parent the parent ConfigProducer.
* @param index unique index for all slobroks
*/
public Slobrok(AbstractConfigProducer<?> parent, int index,
ModelContext.FeatureFlags featureFlags)
{
super(parent, "slobrok." + index);
this.useNewLogic = featureFlags.newLocationBrokerLogic();
portsMeta.on(0).tag("rpc").tag("admin").tag("status");
portsMeta.on(1).tag("http").tag("state");
setProp("index", index);
setProp("clustertype", "slobrok");
setProp("clustername", "admin");
}
@Override
public int getWantedPort() {
if (getId() == 1) {
return BASEPORT;
} else {
return 0;
}
}
@Override
public void allocatePorts(int start, PortAllocBridge from) {
if (start == 0) start = BASEPORT;
from.wantPort(start, "rpc");
from.allocatePort("http");
}
/**
* @return The number of ports needed by the slobrok.
*/
public int getPortCount() {
return 2;
}
/**
* @return The port on which this slobrok should respond
*/
private int getRpcPort() {
return getRelativePort(0);
}
/**
* @return The port on which the state server should respond
*/
@Override
public int getHealthPort() {
return getRelativePort(1);
}
/**
* @return The connection spec to this Slobrok
*/
public String getConnectionSpec() {
return "tcp/" + getHostName() + ":" + getRpcPort();
}
} |
Consider folding the above context.log into this. Yes, you won't get a warning before execution, but you likely don't need it: If this statement isn't reached, you'll get an exception and stacktrace pointing to this code. (and hopefully with sufficient details on configId etc?) | public void produceArtifact(NodeAgentContext context, String configId, UnixPath resultDirectoryInNode) throws IOException {
UnixPath vespaJvmDumper = new UnixPath(context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper"));
context.log(log, Level.INFO,
"Executing '" + vespaJvmDumper + "' with arguments '" + configId + "' and '" + resultDirectoryInNode + "'");
CommandResult result = container.executeCommandInContainerAsRoot(
context, vespaJvmDumper.toString(), configId, resultDirectoryInNode.toString());
context.log(log, Level.INFO,
"vespa-jvm-dumper exited with code '" + result.getExitCode() + "' and output:\n" + result.getOutput());
if (result.getExitCode() > 0) {
throw new IOException("Failed to jvm dump: " + result.getOutput());
}
} | context.log(log, Level.INFO, | public void produceArtifact(NodeAgentContext context, String configId, UnixPath resultDirectoryInNode) throws IOException {
UnixPath vespaJvmDumper = new UnixPath(context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper"));
context.log(log, Level.INFO,
"Executing '" + vespaJvmDumper + "' with arguments '" + configId + "' and '" + resultDirectoryInNode + "'");
CommandResult result = container.executeCommandInContainerAsRoot(
context, vespaJvmDumper.toString(), configId, resultDirectoryInNode.toString());
context.log(log, Level.INFO,
"vespa-jvm-dumper exited with code '" + result.getExitCode() + "' and output:\n" + result.getOutput());
if (result.getExitCode() > 0) {
throw new IOException("Failed to jvm dump: " + result.getOutput());
}
} | class JvmDumpProducer implements ArtifactProducer {
private static final Logger log = Logger.getLogger(JvmDumpProducer.class.getName());
private final ContainerOperations container;
JvmDumpProducer(ContainerOperations container) { this.container = container; }
public static String NAME = "jvm-dump";
@Override public String name() { return NAME; }
@Override
} | class JvmDumpProducer implements ArtifactProducer {
private static final Logger log = Logger.getLogger(JvmDumpProducer.class.getName());
private final ContainerOperations container;
JvmDumpProducer(ContainerOperations container) { this.container = container; }
public static String NAME = "jvm-dump";
@Override public String name() { return NAME; }
@Override
} |
I will do this in future PR. | public void produceArtifact(NodeAgentContext context, String configId, UnixPath resultDirectoryInNode) throws IOException {
UnixPath vespaJvmDumper = new UnixPath(context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper"));
context.log(log, Level.INFO,
"Executing '" + vespaJvmDumper + "' with arguments '" + configId + "' and '" + resultDirectoryInNode + "'");
CommandResult result = container.executeCommandInContainerAsRoot(
context, vespaJvmDumper.toString(), configId, resultDirectoryInNode.toString());
context.log(log, Level.INFO,
"vespa-jvm-dumper exited with code '" + result.getExitCode() + "' and output:\n" + result.getOutput());
if (result.getExitCode() > 0) {
throw new IOException("Failed to jvm dump: " + result.getOutput());
}
} | context.log(log, Level.INFO, | public void produceArtifact(NodeAgentContext context, String configId, UnixPath resultDirectoryInNode) throws IOException {
UnixPath vespaJvmDumper = new UnixPath(context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper"));
context.log(log, Level.INFO,
"Executing '" + vespaJvmDumper + "' with arguments '" + configId + "' and '" + resultDirectoryInNode + "'");
CommandResult result = container.executeCommandInContainerAsRoot(
context, vespaJvmDumper.toString(), configId, resultDirectoryInNode.toString());
context.log(log, Level.INFO,
"vespa-jvm-dumper exited with code '" + result.getExitCode() + "' and output:\n" + result.getOutput());
if (result.getExitCode() > 0) {
throw new IOException("Failed to jvm dump: " + result.getOutput());
}
} | class JvmDumpProducer implements ArtifactProducer {
private static final Logger log = Logger.getLogger(JvmDumpProducer.class.getName());
private final ContainerOperations container;
JvmDumpProducer(ContainerOperations container) { this.container = container; }
public static String NAME = "jvm-dump";
@Override public String name() { return NAME; }
@Override
} | class JvmDumpProducer implements ArtifactProducer {
private static final Logger log = Logger.getLogger(JvmDumpProducer.class.getName());
private final ContainerOperations container;
JvmDumpProducer(ContainerOperations container) { this.container = container; }
public static String NAME = "jvm-dump";
@Override public String name() { return NAME; }
@Override
} |
```suggestion deployLogger.logApplicationPackage(Level.WARNING, msg); ``` To show this as a notification in console, e.g.:  | private RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
String msg = "rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.";
if (search.getDeployProperties().featureFlags().enforceRankProfileInheritance()) {
throw new IllegalArgumentException(msg);
} else {
deployLogger.log(Level.WARNING, msg);
inherited = resolveIndependentOfInheritance();
}
} else {
List<String> children = new ArrayList<>();
children.add(createFullyQualifiedName());
verifyNoInheritanceCycle(children, inherited);
}
}
return inherited;
} | deployLogger.log(Level.WARNING, msg); | private RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
String msg = "rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.";
if (search.getDeployProperties().featureFlags().enforceRankProfileInheritance()) {
throw new IllegalArgumentException(msg);
} else {
deployLogger.log(Level.WARNING, msg);
inherited = resolveIndependentOfInheritance();
}
} else {
List<String> children = new ArrayList<>();
children.add(createFullyQualifiedName());
verifyNoInheritanceCycle(children, inherited);
}
}
return inherited;
} | class RankProfile implements Cloneable {
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
private final DeployLogger deployLogger;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
this.deployLogger = search.getDeployLogger();
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
this.deployLogger = deployLogger;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
private RankProfile resolveIndependentOfInheritance() {
for (RankProfile rankProfile : rankProfileRegistry.all()) {
if (rankProfile.getName().equals(inheritedName)) return rankProfile;
}
return null;
}
private String createFullyQualifiedName() {
return (search != null)
? (search.getName() + "." + getName())
: getName();
}
private void verifyNoInheritanceCycle(List<String> children, RankProfile parent) {
children.add(parent.createFullyQualifiedName());
String root = children.get(0);
if (root.equals(parent.createFullyQualifiedName())) {
throw new IllegalArgumentException("There is a cycle in the inheritance for rank-profile '" + root + "' = " + children);
}
if (parent.getInherited() != null) {
verifyNoInheritanceCycle(children, parent.getInherited());
}
}
private RankProfile resolveInherited(ImmutableSearch search) {
SDDocumentType documentType = search.getDocument();
if (documentType != null) {
if (name.equals(inheritedName)) {
for (SDDocumentType baseType : documentType.getInheritedTypes()) {
RankProfile resolvedFromBase = rankProfileRegistry.resolve(baseType, inheritedName);
if (resolvedFromBase != null) return resolvedFromBase;
}
}
return rankProfileRegistry.resolve(documentType, inheritedName);
}
return rankProfileRegistry.get(search.getName(), inheritedName);
}
private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? resolveInherited(search)
: rankProfileRegistry.getGlobal(inheritedName);
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} | class RankProfile implements Cloneable {
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
private final DeployLogger deployLogger;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
this.deployLogger = search.getDeployLogger();
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
this.deployLogger = deployLogger;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
private RankProfile resolveIndependentOfInheritance() {
for (RankProfile rankProfile : rankProfileRegistry.all()) {
if (rankProfile.getName().equals(inheritedName)) return rankProfile;
}
return null;
}
private String createFullyQualifiedName() {
return (search != null)
? (search.getName() + "." + getName())
: getName();
}
private void verifyNoInheritanceCycle(List<String> children, RankProfile parent) {
children.add(parent.createFullyQualifiedName());
String root = children.get(0);
if (root.equals(parent.createFullyQualifiedName())) {
throw new IllegalArgumentException("There is a cycle in the inheritance for rank-profile '" + root + "' = " + children);
}
if (parent.getInherited() != null) {
verifyNoInheritanceCycle(children, parent.getInherited());
}
}
private RankProfile resolveInherited(ImmutableSearch search) {
SDDocumentType documentType = search.getDocument();
if (documentType != null) {
if (name.equals(inheritedName)) {
for (SDDocumentType baseType : documentType.getInheritedTypes()) {
RankProfile resolvedFromBase = rankProfileRegistry.resolve(baseType, inheritedName);
if (resolvedFromBase != null) return resolvedFromBase;
}
}
return rankProfileRegistry.resolve(documentType, inheritedName);
}
return rankProfileRegistry.get(search.getName(), inheritedName);
}
private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? resolveInherited(search)
: rankProfileRegistry.getGlobal(inheritedName);
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} |
Thanks, will do. | private RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
String msg = "rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.";
if (search.getDeployProperties().featureFlags().enforceRankProfileInheritance()) {
throw new IllegalArgumentException(msg);
} else {
deployLogger.log(Level.WARNING, msg);
inherited = resolveIndependentOfInheritance();
}
} else {
List<String> children = new ArrayList<>();
children.add(createFullyQualifiedName());
verifyNoInheritanceCycle(children, inherited);
}
}
return inherited;
} | deployLogger.log(Level.WARNING, msg); | private RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
String msg = "rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.";
if (search.getDeployProperties().featureFlags().enforceRankProfileInheritance()) {
throw new IllegalArgumentException(msg);
} else {
deployLogger.log(Level.WARNING, msg);
inherited = resolveIndependentOfInheritance();
}
} else {
List<String> children = new ArrayList<>();
children.add(createFullyQualifiedName());
verifyNoInheritanceCycle(children, inherited);
}
}
return inherited;
} | class RankProfile implements Cloneable {
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
private final DeployLogger deployLogger;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
this.deployLogger = search.getDeployLogger();
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
this.deployLogger = deployLogger;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
private RankProfile resolveIndependentOfInheritance() {
for (RankProfile rankProfile : rankProfileRegistry.all()) {
if (rankProfile.getName().equals(inheritedName)) return rankProfile;
}
return null;
}
private String createFullyQualifiedName() {
return (search != null)
? (search.getName() + "." + getName())
: getName();
}
private void verifyNoInheritanceCycle(List<String> children, RankProfile parent) {
children.add(parent.createFullyQualifiedName());
String root = children.get(0);
if (root.equals(parent.createFullyQualifiedName())) {
throw new IllegalArgumentException("There is a cycle in the inheritance for rank-profile '" + root + "' = " + children);
}
if (parent.getInherited() != null) {
verifyNoInheritanceCycle(children, parent.getInherited());
}
}
private RankProfile resolveInherited(ImmutableSearch search) {
SDDocumentType documentType = search.getDocument();
if (documentType != null) {
if (name.equals(inheritedName)) {
for (SDDocumentType baseType : documentType.getInheritedTypes()) {
RankProfile resolvedFromBase = rankProfileRegistry.resolve(baseType, inheritedName);
if (resolvedFromBase != null) return resolvedFromBase;
}
}
return rankProfileRegistry.resolve(documentType, inheritedName);
}
return rankProfileRegistry.get(search.getName(), inheritedName);
}
private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? resolveInherited(search)
: rankProfileRegistry.getGlobal(inheritedName);
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} | class RankProfile implements Cloneable {
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
private final DeployLogger deployLogger;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
this.deployLogger = search.getDeployLogger();
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
this.deployLogger = deployLogger;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
private RankProfile resolveIndependentOfInheritance() {
for (RankProfile rankProfile : rankProfileRegistry.all()) {
if (rankProfile.getName().equals(inheritedName)) return rankProfile;
}
return null;
}
private String createFullyQualifiedName() {
return (search != null)
? (search.getName() + "." + getName())
: getName();
}
private void verifyNoInheritanceCycle(List<String> children, RankProfile parent) {
children.add(parent.createFullyQualifiedName());
String root = children.get(0);
if (root.equals(parent.createFullyQualifiedName())) {
throw new IllegalArgumentException("There is a cycle in the inheritance for rank-profile '" + root + "' = " + children);
}
if (parent.getInherited() != null) {
verifyNoInheritanceCycle(children, parent.getInherited());
}
}
private RankProfile resolveInherited(ImmutableSearch search) {
SDDocumentType documentType = search.getDocument();
if (documentType != null) {
if (name.equals(inheritedName)) {
for (SDDocumentType baseType : documentType.getInheritedTypes()) {
RankProfile resolvedFromBase = rankProfileRegistry.resolve(baseType, inheritedName);
if (resolvedFromBase != null) return resolvedFromBase;
}
}
return rankProfileRegistry.resolve(documentType, inheritedName);
}
return rankProfileRegistry.get(search.getName(), inheritedName);
}
private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? resolveInherited(search)
: rankProfileRegistry.getGlobal(inheritedName);
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} |
Hmm, perhaps we should remove this, for those who deploy every day to dev, so their deployments can be upgraded when they sleep, too? | protected double maintain() {
AtomicInteger attempts = new AtomicInteger();
AtomicInteger failures = new AtomicInteger();
Version systemVersion = controller().readSystemVersion();
for (Application application : controller().applications().readable())
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values())
try {
attempts.incrementAndGet();
JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get());
if ( ! deployment.zone().environment().isManuallyDeployed()) continue;
Run last = controller().jobController().last(job).get();
Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.empty(), Optional.empty());
if ( ! deployment.version().isBefore(target.targetPlatform())) continue;
if ( controller().clock().instant().isBefore(last.start().plus(Duration.ofDays(1)))) continue;
if ( ! isLikelyNightFor(job)) continue;
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
controller().jobController().start(instance.id(), JobType.from(controller().system(), deployment.zone()).get(), target, true);
} catch (Exception e) {
failures.incrementAndGet();
log.log(Level.WARNING, "Failed upgrading " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
return asSuccessFactor(attempts.get(), failures.get());
} | if ( controller().clock().instant().isBefore(last.start().plus(Duration.ofDays(1)))) continue; | protected double maintain() {
AtomicInteger attempts = new AtomicInteger();
AtomicInteger failures = new AtomicInteger();
Version systemVersion = controller().readSystemVersion();
for (Application application : controller().applications().readable())
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values())
try {
attempts.incrementAndGet();
JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get());
if ( ! deployment.zone().environment().isManuallyDeployed()) continue;
Run last = controller().jobController().last(job).get();
Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.empty(), Optional.empty());
if ( ! deployment.version().isBefore(target.targetPlatform())) continue;
if ( controller().clock().instant().isBefore(last.start().plus(Duration.ofDays(1)))) continue;
if ( ! isLikelyNightFor(job)) continue;
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
controller().jobController().start(instance.id(), JobType.from(controller().system(), deployment.zone()).get(), target, true);
} catch (Exception e) {
failures.incrementAndGet();
log.log(Level.WARNING, "Failed upgrading " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
return asSuccessFactor(attempts.get(), failures.get());
} | class DeploymentUpgrader extends ControllerMaintainer {
public DeploymentUpgrader(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
private boolean isLikelyNightFor(JobId job) {
int hour = hourOf(controller().clock().instant());
int[] runStarts = controller().jobController().jobStarts(job).stream()
.mapToInt(DeploymentUpgrader::hourOf)
.toArray();
int localNight = mostLikelyWeeHour(runStarts);
return Math.abs(hour - localNight) <= 1;
}
static int mostLikelyWeeHour(int[] starts) {
double weight = 1;
double[] buckets = new double[24];
for (int start : starts)
buckets[start] += weight *= 0.8;
int best = -1;
double min = Double.MAX_VALUE;
for (int i = 12; i < 36; i++) {
double sum = 0;
for (int j = -12; j < 12; j++)
sum += buckets[(i + j) % 24] / (Math.abs(j) + 1);
if (sum < min) {
min = sum;
best = i;
}
}
return (best + 2) % 24;
}
private static int hourOf(Instant instant) {
return (int) (instant.toEpochMilli() / 3_600_000 % 24);
}
} | class DeploymentUpgrader extends ControllerMaintainer {
public DeploymentUpgrader(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
private boolean isLikelyNightFor(JobId job) {
int hour = hourOf(controller().clock().instant());
int[] runStarts = controller().jobController().jobStarts(job).stream()
.mapToInt(DeploymentUpgrader::hourOf)
.toArray();
int localNight = mostLikelyWeeHour(runStarts);
return Math.abs(hour - localNight) <= 1;
}
static int mostLikelyWeeHour(int[] starts) {
double weight = 1;
double[] buckets = new double[24];
for (int start : starts)
buckets[start] += weight *= 0.8;
int best = -1;
double min = Double.MAX_VALUE;
for (int i = 12; i < 36; i++) {
double sum = 0;
for (int j = -12; j < 12; j++)
sum += buckets[(i + j) % 24] / (Math.abs(j) + 1);
if (sum < min) {
min = sum;
best = i;
}
}
return (best + 2) % 24;
}
private static int hourOf(Instant instant) {
return (int) (instant.toEpochMilli() / 3_600_000 % 24);
}
} |
Why too large when new/deleted? | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n", | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
If left is non-binary, and present, but right is binary, this should still trigger, but it won't. | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false)) | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
Wouldn't it be better to detect new/deleted files first, and then do all the more involved comparisons aftterwards? | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted"))); | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
It's fine to collapse this to a single declaration, as close will propagate. | private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
} | BufferedReader bufferedReader = new BufferedReader(streamReader)) { | private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
Hmm, I guess you wan to print the diff if it's not large and one file is new. | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted"))); | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
Ok so this is fine. | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n", | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
If file is added/deleted, then then diff is the entire file with lines prefixed with +/-, so the diff will be slightly bigger than the file. | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n", | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
Please also do this for the dev path :) | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"]}"),
"{\"message\":\"Request created\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", GET)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"" +
",\"artifacts\":[\"jvm-dump\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
"- <deployment version='1.0' >\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR), | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/job/dev-us-east-1/diff/1", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("--- search-definitions/test.sd\n" +
"@@ -1,0 +1,1 @@\n" +
"+ search test { }\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"]}"),
"{\"message\":\"Request created\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", GET)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"" +
",\"artifacts\":[\"jvm-dump\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
"- <deployment version='1.0' >\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} |
Not sure if that would be any easier? I still want to do diff as long as the file is non-binary (in that case the diff is effectively the entire file prefixed with +/-) | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted"))); | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
Added | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"]}"),
"{\"message\":\"Request created\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", GET)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"" +
",\"artifacts\":[\"jvm-dump\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
"- <deployment version='1.0' >\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR), | public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/job/dev-us-east-1/diff/1", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("--- search-definitions/test.sd\n" +
"@@ -1,0 +1,1 @@\n" +
"+ search test { }\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"]}"),
"{\"message\":\"Request created\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", GET)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"" +
",\"artifacts\":[\"jvm-dump\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
"- <deployment version='1.0' >\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployDirectly() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester().upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
}
@Test
public void testErrorResponses() throws Exception {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} |
Simplified. | private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
} | BufferedReader bufferedReader = new BufferedReader(streamReader)) { | private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
Thanks, fixed. | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false)) | private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
}
} |
Too scared to introduce more changes in this PR :scream: | protected double maintain() {
AtomicInteger attempts = new AtomicInteger();
AtomicInteger failures = new AtomicInteger();
Version systemVersion = controller().readSystemVersion();
for (Application application : controller().applications().readable())
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values())
try {
attempts.incrementAndGet();
JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get());
if ( ! deployment.zone().environment().isManuallyDeployed()) continue;
Run last = controller().jobController().last(job).get();
Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.empty(), Optional.empty());
if ( ! deployment.version().isBefore(target.targetPlatform())) continue;
if ( controller().clock().instant().isBefore(last.start().plus(Duration.ofDays(1)))) continue;
if ( ! isLikelyNightFor(job)) continue;
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
controller().jobController().start(instance.id(), JobType.from(controller().system(), deployment.zone()).get(), target, true);
} catch (Exception e) {
failures.incrementAndGet();
log.log(Level.WARNING, "Failed upgrading " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
return asSuccessFactor(attempts.get(), failures.get());
} | if ( controller().clock().instant().isBefore(last.start().plus(Duration.ofDays(1)))) continue; | protected double maintain() {
AtomicInteger attempts = new AtomicInteger();
AtomicInteger failures = new AtomicInteger();
Version systemVersion = controller().readSystemVersion();
for (Application application : controller().applications().readable())
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values())
try {
attempts.incrementAndGet();
JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get());
if ( ! deployment.zone().environment().isManuallyDeployed()) continue;
Run last = controller().jobController().last(job).get();
Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.empty(), Optional.empty());
if ( ! deployment.version().isBefore(target.targetPlatform())) continue;
if ( controller().clock().instant().isBefore(last.start().plus(Duration.ofDays(1)))) continue;
if ( ! isLikelyNightFor(job)) continue;
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
controller().jobController().start(instance.id(), JobType.from(controller().system(), deployment.zone()).get(), target, true);
} catch (Exception e) {
failures.incrementAndGet();
log.log(Level.WARNING, "Failed upgrading " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
return asSuccessFactor(attempts.get(), failures.get());
} | class DeploymentUpgrader extends ControllerMaintainer {
public DeploymentUpgrader(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
private boolean isLikelyNightFor(JobId job) {
int hour = hourOf(controller().clock().instant());
int[] runStarts = controller().jobController().jobStarts(job).stream()
.mapToInt(DeploymentUpgrader::hourOf)
.toArray();
int localNight = mostLikelyWeeHour(runStarts);
return Math.abs(hour - localNight) <= 1;
}
static int mostLikelyWeeHour(int[] starts) {
double weight = 1;
double[] buckets = new double[24];
for (int start : starts)
buckets[start] += weight *= 0.8;
int best = -1;
double min = Double.MAX_VALUE;
for (int i = 12; i < 36; i++) {
double sum = 0;
for (int j = -12; j < 12; j++)
sum += buckets[(i + j) % 24] / (Math.abs(j) + 1);
if (sum < min) {
min = sum;
best = i;
}
}
return (best + 2) % 24;
}
private static int hourOf(Instant instant) {
return (int) (instant.toEpochMilli() / 3_600_000 % 24);
}
} | class DeploymentUpgrader extends ControllerMaintainer {
public DeploymentUpgrader(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
private boolean isLikelyNightFor(JobId job) {
int hour = hourOf(controller().clock().instant());
int[] runStarts = controller().jobController().jobStarts(job).stream()
.mapToInt(DeploymentUpgrader::hourOf)
.toArray();
int localNight = mostLikelyWeeHour(runStarts);
return Math.abs(hour - localNight) <= 1;
}
static int mostLikelyWeeHour(int[] starts) {
double weight = 1;
double[] buckets = new double[24];
for (int start : starts)
buckets[start] += weight *= 0.8;
int best = -1;
double min = Double.MAX_VALUE;
for (int i = 12; i < 36; i++) {
double sum = 0;
for (int j = -12; j < 12; j++)
sum += buckets[(i + j) % 24] / (Math.abs(j) + 1);
if (sum < min) {
min = sum;
best = i;
}
}
return (best + 2) % 24;
}
private static int hourOf(Instant instant) {
return (int) (instant.toEpochMilli() / 3_600_000 % 24);
}
} |
This will only get status for endpoints having rotations. Have a look at this method: https://github.com/vespa-engine/vespa/blob/a8c616b18803e2e3a959b9ba987deb64fabf252a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java#L258-L306 Try extracting a method from this and reuse it here. | private HttpResponse endpoints(Path path) {
var instanceId = instanceFrom(path);
var endpoints = controller.routing().endpointsOf(instanceId);
var zoneStatus = endpoints.asList().stream()
.flatMap(e -> e.zones().stream())
.distinct()
.collect(Collectors.toMap(
zone -> zone,
zone -> controller.routing().globalRotationStatus(new DeploymentId(instanceId, zone))
));
var slime = new Slime();
var root = slime.setObject();
var endpointsRoot = root.setArray("endpoints");
endpoints.forEach(endpoint -> {
var endpointRoot = endpointsRoot.addObject();
endpointToSlime(endpointRoot, endpoint);
var zonesRoot = endpointRoot.setArray("zones");
zoneStatus.forEach((zone, status) -> {
endpointStatusToSlime(zonesRoot.addObject(), zone, status.get(endpoint));
});
});
return new SlimeJsonResponse(slime);
} | zone -> controller.routing().globalRotationStatus(new DeploymentId(instanceId, zone)) | private HttpResponse endpoints(Path path) {
var instanceId = instanceFrom(path);
var endpoints = controller.routing().endpointsOf(instanceId);
var deployments = endpoints.asList().stream()
.flatMap(e -> e.zones().stream())
.distinct()
.map(zoneId -> new DeploymentId(instanceId, zoneId))
.collect(Collectors.toList());
var deploymentsStatus = deployments.stream()
.collect(Collectors.toMap(
deploymentId -> deploymentId,
deploymentId -> Stream.concat(
directGlobalRoutingStatus(deploymentId).stream(),
sharedGlobalRoutingStatus(deploymentId).stream()
).collect(Collectors.toList())
));
var slime = new Slime();
var root = slime.setObject();
var endpointsRoot = root.setArray("endpoints");
endpoints.forEach(endpoint -> {
var endpointRoot = endpointsRoot.addObject();
endpointToSlime(endpointRoot, endpoint);
var zonesRoot = endpointRoot.setArray("zones");
endpoint.zones().forEach(zoneId -> {
var deploymentId = new DeploymentId(instanceId, zoneId);
deploymentsStatus.getOrDefault(deploymentId, List.of()).forEach(status -> {
deploymentStatusToSlime(zonesRoot.addObject(), deploymentId, status, endpoint.routingMethod());
});
});
});
return new SlimeJsonResponse(slime);
} | class RoutingApiHandler extends AuditLoggingRequestHandler {
private final Controller controller;
public RoutingApiHandler(Context ctx, Controller controller) {
super(ctx, controller.auditLogger());
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
var path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return get(path, request);
case POST: return post(path);
case DELETE: return delete(path);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse delete(Path path) {
if (path.matches("/routing/v1/inactive/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return setDeploymentStatus(path, true);
if (path.matches("/routing/v1/inactive/environment/{environment}/region/{region}")) return setZoneStatus(path, true);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse post(Path path) {
if (path.matches("/routing/v1/inactive/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return setDeploymentStatus(path, false);
if (path.matches("/routing/v1/inactive/environment/{environment}/region/{region}")) return setZoneStatus(path, false);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse get(Path path, HttpRequest request) {
if (path.matches("/routing/v1/")) return status(request.getUri());
if (path.matches("/routing/v1/status/tenant/{tenant}")) return tenant(path, request);
if (path.matches("/routing/v1/status/tenant/{tenant}/application/{application}")) return application(path, request);
if (path.matches("/routing/v1/status/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path, request);
if (path.matches("/routing/v1/status/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path);
if (path.matches("/routing/v1/status/tenant/{tenant}/application/{application}/instance/{instance}/endpoint")) return endpoints(path);
if (path.matches("/routing/v1/status/environment")) return environment(request);
if (path.matches("/routing/v1/status/environment/{environment}/region/{region}")) return zone(path);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse environment(HttpRequest request) {
var zones = controller.zoneRegistry().zones().all().ids();
if (isRecursive(request)) {
var slime = new Slime();
var root = slime.setObject();
var zonesArray = root.setArray("zones");
for (var zone : zones) {
toSlime(zone, zonesArray.addObject());
}
return new SlimeJsonResponse(slime);
}
var resources = controller.zoneRegistry().zones().all().ids().stream()
.map(zone -> zone.environment().value() +
"/region/" + zone.region().value())
.sorted()
.collect(Collectors.toList());
return new ResourceResponse(request.getUri(), resources);
}
private HttpResponse status(URI requestUrl) {
return new ResourceResponse(requestUrl, "status/tenant", "status/environment");
}
private HttpResponse tenant(Path path, HttpRequest request) {
var tenantName = tenantFrom(path);
if (isRecursive(request)) {
var slime = new Slime();
var root = slime.setObject();
toSlime(controller.applications().asList(tenantName), null, null, root);
return new SlimeJsonResponse(slime);
}
var resources = controller.applications().asList(tenantName).stream()
.map(Application::id)
.map(TenantAndApplicationId::application)
.map(ApplicationName::value)
.map(application -> "application/" + application)
.sorted()
.collect(Collectors.toList());
return new ResourceResponse(request.getUri(), resources);
}
private HttpResponse application(Path path, HttpRequest request) {
var tenantAndApplicationId = tenantAndApplicationIdFrom(path);
if (isRecursive(request)) {
var slime = new Slime();
var root = slime.setObject();
toSlime(List.of(controller.applications().requireApplication(tenantAndApplicationId)), null,
null, root);
return new SlimeJsonResponse(slime);
}
var resources = controller.applications().requireApplication(tenantAndApplicationId).instances().keySet().stream()
.map(InstanceName::value)
.map(instance -> "instance/" + instance)
.sorted()
.collect(Collectors.toList());
return new ResourceResponse(request.getUri(), resources);
}
private HttpResponse instance(Path path, HttpRequest request) {
var instanceId = instanceFrom(path);
if (isRecursive(request)) {
var slime = new Slime();
var root = slime.setObject();
toSlime(List.of(controller.applications().requireApplication(TenantAndApplicationId.from(instanceId))),
instanceId, null, root);
return new SlimeJsonResponse(slime);
}
var resources = controller.applications().requireInstance(instanceId).deployments().keySet().stream()
.map(zone -> "environment/" + zone.environment().value() +
"/region/" + zone.region().value())
.sorted()
.collect(Collectors.toList());
return new ResourceResponse(request.getUri(), resources);
}
private HttpResponse setZoneStatus(Path path, boolean in) {
var zone = zoneFrom(path);
if (controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) {
var status = in ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(zone, status);
} else {
controller.serviceRegistry().configServer().setGlobalRotationStatus(zone, in);
}
return new MessageResponse("Set global routing status for deployments in " + zone + " to " +
(in ? "IN" : "OUT"));
}
private HttpResponse zone(Path path) {
var zone = zoneFrom(path);
var slime = new Slime();
var root = slime.setObject();
toSlime(zone, root);
return new SlimeJsonResponse(slime);
}
private void toSlime(ZoneId zone, Cursor zoneObject) {
if (controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) {
var zonePolicy = controller.routing().policies().get(zone);
zoneStatusToSlime(zoneObject, zonePolicy.zone(), zonePolicy.globalRouting(), RoutingMethod.exclusive);
} else {
var in = controller.serviceRegistry().configServer().getGlobalRotationStatus(zone);
var globalRouting = new GlobalRouting(in ? GlobalRouting.Status.in : GlobalRouting.Status.out,
GlobalRouting.Agent.operator, Instant.EPOCH);
zoneStatusToSlime(zoneObject, zone, globalRouting, RoutingMethod.shared);
}
}
private HttpResponse setDeploymentStatus(Path path, boolean in) {
var deployment = deploymentFrom(path);
var instance = controller.applications().requireInstance(deployment.applicationId());
var status = in ? GlobalRouting.Status.in : GlobalRouting.Status.out;
var agent = GlobalRouting.Agent.operator;
requireDeployment(deployment, instance);
if (rotationCanRouteTo(deployment.zoneId())) {
var endpointStatus = new EndpointStatus(in ? EndpointStatus.Status.in : EndpointStatus.Status.out, "",
agent.name(),
controller.clock().instant().getEpochSecond());
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
return new MessageResponse("Set global routing status for " + deployment + " to " + (in ? "IN" : "OUT"));
}
private HttpResponse deployment(Path path) {
var slime = new Slime();
var root = slime.setObject();
var deploymentId = deploymentFrom(path);
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
toSlime(List.of(application), deploymentId.applicationId(), deploymentId.zoneId(), root);
return new SlimeJsonResponse(slime);
}
private void toSlime(List<Application> applications, ApplicationId instanceId, ZoneId zoneId, Cursor root) {
var deploymentsArray = root.setArray("deployments");
for (var application : applications) {
var instances = instanceId == null
? application.instances().values()
: List.of(application.instances().get(instanceId.instance()));
for (var instance : instances) {
var zones = zoneId == null
? instance.deployments().keySet().stream().sorted(Comparator.comparing(ZoneId::value))
.collect(Collectors.toList())
: List.of(zoneId);
for (var zone : zones) {
var deploymentId = requireDeployment(new DeploymentId(instance.id(), zone), instance);
if (rotationCanRouteTo(zone)) {
var rotationStatus = controller.routing().globalRotationStatus(deploymentId);
var endpointStatus = rotationStatus.values().stream().findFirst();
if (endpointStatus.isPresent()) {
var changedAt = Instant.ofEpochSecond(endpointStatus.get().getEpoch());
GlobalRouting.Agent agent;
try {
agent = GlobalRouting.Agent.valueOf(endpointStatus.get().getAgent());
} catch (IllegalArgumentException e) {
agent = GlobalRouting.Agent.unknown;
}
var status = endpointStatus.get().getStatus() == EndpointStatus.Status.in
? GlobalRouting.Status.in
: GlobalRouting.Status.out;
deploymentStatusToSlime(deploymentsArray.addObject(), deploymentId,
new GlobalRouting(status, agent, changedAt),
RoutingMethod.shared);
}
}
var routingPolicies = controller.routing().policies().get(deploymentId);
for (var policy : routingPolicies.values()) {
if (policy.endpoints().isEmpty()) continue;
if (!controller.zoneRegistry().routingMethods(policy.id().zone()).contains(RoutingMethod.exclusive)) continue;
deploymentStatusToSlime(deploymentsArray.addObject(), new DeploymentId(policy.id().owner(),
policy.id().zone()),
policy.status().globalRouting(), RoutingMethod.exclusive);
}
}
}
}
}
/** Returns whether a rotation can route traffic to given zone */
private boolean rotationCanRouteTo(ZoneId zone) {
return controller.zoneRegistry().routingMethods(zone).stream().anyMatch(RoutingMethod::isShared);
}
private static void zoneStatusToSlime(Cursor object, ZoneId zone, GlobalRouting globalRouting, RoutingMethod method) {
object.setString("routingMethod", asString(method));
object.setString("environment", zone.environment().value());
object.setString("region", zone.region().value());
object.setString("status", asString(globalRouting.status()));
object.setString("agent", asString(globalRouting.agent()));
object.setLong("changedAt", globalRouting.changedAt().toEpochMilli());
}
private static void deploymentStatusToSlime(Cursor object, DeploymentId deployment, GlobalRouting globalRouting, RoutingMethod method) {
object.setString("routingMethod", asString(method));
object.setString("instance", deployment.applicationId().serializedForm());
object.setString("environment", deployment.zoneId().environment().value());
object.setString("region", deployment.zoneId().region().value());
object.setString("status", asString(globalRouting.status()));
object.setString("agent", asString(globalRouting.agent()));
object.setLong("changedAt", globalRouting.changedAt().toEpochMilli());
}
private static void endpointToSlime(Cursor object, Endpoint endpoint) {
object.setString("endpoint", endpoint.name());
object.setString("dns", endpoint.dnsName());
object.setString("routingMethod", endpoint.routingMethod().name());
object.setString("cluster", endpoint.cluster().value());
object.setString("scope", endpoint.scope().name());
}
private static void endpointStatusToSlime(Cursor object, ZoneId zone, EndpointStatus status) {
object.setString("zone", zone.value());
object.setString("status", status.getStatus().name());
object.setString("reason", status.getReason());
}
private TenantName tenantFrom(Path path) {
return TenantName.from(path.get("tenant"));
}
private ApplicationName applicationFrom(Path path) {
return ApplicationName.from(path.get("application"));
}
private TenantAndApplicationId tenantAndApplicationIdFrom(Path path) {
return TenantAndApplicationId.from(tenantFrom(path), applicationFrom(path));
}
private ApplicationId instanceFrom(Path path) {
return ApplicationId.from(tenantFrom(path), applicationFrom(path), InstanceName.from(path.get("instance")));
}
private DeploymentId deploymentFrom(Path path) {
return new DeploymentId(instanceFrom(path), zoneFrom(path));
}
private ZoneId zoneFrom(Path path) {
var zone = ZoneId.from(path.get("environment"), path.get("region"));
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("No such zone: " + zone);
}
return zone;
}
private static DeploymentId requireDeployment(DeploymentId deployment, Instance instance) {
if (!instance.deployments().containsKey(deployment.zoneId())) {
throw new IllegalArgumentException("No such deployment: " + deployment);
}
return deployment;
}
private static boolean isRecursive(HttpRequest request) {
return "true".equals(request.getProperty("recursive"));
}
private static String asString(GlobalRouting.Status status) {
switch (status) {
case in: return "in";
case out: return "out";
default: return "unknown";
}
}
private static String asString(GlobalRouting.Agent agent) {
switch (agent) {
case operator: return "operator";
case system: return "system";
case tenant: return "tenant";
default: return "unknown";
}
}
private static String asString(RoutingMethod method) {
switch (method) {
case shared: return "shared";
case exclusive: return "exclusive";
case sharedLayer4: return "sharedLayer4";
default: return "unknown";
}
}
} | class RoutingApiHandler extends AuditLoggingRequestHandler {
private final Controller controller;
public RoutingApiHandler(Context ctx, Controller controller) {
super(ctx, controller.auditLogger());
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
var path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return get(path, request);
case POST: return post(path);
case DELETE: return delete(path);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse delete(Path path) {
if (path.matches("/routing/v1/inactive/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return setDeploymentStatus(path, true);
if (path.matches("/routing/v1/inactive/environment/{environment}/region/{region}")) return setZoneStatus(path, true);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse post(Path path) {
if (path.matches("/routing/v1/inactive/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return setDeploymentStatus(path, false);
if (path.matches("/routing/v1/inactive/environment/{environment}/region/{region}")) return setZoneStatus(path, false);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse get(Path path, HttpRequest request) {
if (path.matches("/routing/v1/")) return status(request.getUri());
if (path.matches("/routing/v1/status/tenant/{tenant}")) return tenant(path, request);
if (path.matches("/routing/v1/status/tenant/{tenant}/application/{application}")) return application(path, request);
if (path.matches("/routing/v1/status/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path, request);
if (path.matches("/routing/v1/status/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path);
if (path.matches("/routing/v1/status/tenant/{tenant}/application/{application}/instance/{instance}/endpoint")) return endpoints(path);
if (path.matches("/routing/v1/status/environment")) return environment(request);
if (path.matches("/routing/v1/status/environment/{environment}/region/{region}")) return zone(path);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse environment(HttpRequest request) {
var zones = controller.zoneRegistry().zones().all().ids();
if (isRecursive(request)) {
var slime = new Slime();
var root = slime.setObject();
var zonesArray = root.setArray("zones");
for (var zone : zones) {
toSlime(zone, zonesArray.addObject());
}
return new SlimeJsonResponse(slime);
}
var resources = controller.zoneRegistry().zones().all().ids().stream()
.map(zone -> zone.environment().value() +
"/region/" + zone.region().value())
.sorted()
.collect(Collectors.toList());
return new ResourceResponse(request.getUri(), resources);
}
private HttpResponse status(URI requestUrl) {
return new ResourceResponse(requestUrl, "status/tenant", "status/environment");
}
private HttpResponse tenant(Path path, HttpRequest request) {
var tenantName = tenantFrom(path);
if (isRecursive(request)) {
var slime = new Slime();
var root = slime.setObject();
toSlime(controller.applications().asList(tenantName), null, null, root);
return new SlimeJsonResponse(slime);
}
var resources = controller.applications().asList(tenantName).stream()
.map(Application::id)
.map(TenantAndApplicationId::application)
.map(ApplicationName::value)
.map(application -> "application/" + application)
.sorted()
.collect(Collectors.toList());
return new ResourceResponse(request.getUri(), resources);
}
private HttpResponse application(Path path, HttpRequest request) {
var tenantAndApplicationId = tenantAndApplicationIdFrom(path);
if (isRecursive(request)) {
var slime = new Slime();
var root = slime.setObject();
toSlime(List.of(controller.applications().requireApplication(tenantAndApplicationId)), null,
null, root);
return new SlimeJsonResponse(slime);
}
var resources = controller.applications().requireApplication(tenantAndApplicationId).instances().keySet().stream()
.map(InstanceName::value)
.map(instance -> "instance/" + instance)
.sorted()
.collect(Collectors.toList());
return new ResourceResponse(request.getUri(), resources);
}
private HttpResponse instance(Path path, HttpRequest request) {
var instanceId = instanceFrom(path);
if (isRecursive(request)) {
var slime = new Slime();
var root = slime.setObject();
toSlime(List.of(controller.applications().requireApplication(TenantAndApplicationId.from(instanceId))),
instanceId, null, root);
return new SlimeJsonResponse(slime);
}
var resources = controller.applications().requireInstance(instanceId).deployments().keySet().stream()
.map(zone -> "environment/" + zone.environment().value() +
"/region/" + zone.region().value())
.sorted()
.collect(Collectors.toList());
return new ResourceResponse(request.getUri(), resources);
}
private HttpResponse setZoneStatus(Path path, boolean in) {
var zone = zoneFrom(path);
if (controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) {
var status = in ? GlobalRouting.Status.in : GlobalRouting.Status.out;
controller.routing().policies().setGlobalRoutingStatus(zone, status);
} else {
controller.serviceRegistry().configServer().setGlobalRotationStatus(zone, in);
}
return new MessageResponse("Set global routing status for deployments in " + zone + " to " +
(in ? "IN" : "OUT"));
}
private HttpResponse zone(Path path) {
var zone = zoneFrom(path);
var slime = new Slime();
var root = slime.setObject();
toSlime(zone, root);
return new SlimeJsonResponse(slime);
}
private void toSlime(ZoneId zone, Cursor zoneObject) {
if (controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) {
var zonePolicy = controller.routing().policies().get(zone);
zoneStatusToSlime(zoneObject, zonePolicy.zone(), zonePolicy.globalRouting(), RoutingMethod.exclusive);
} else {
var in = controller.serviceRegistry().configServer().getGlobalRotationStatus(zone);
var globalRouting = new GlobalRouting(in ? GlobalRouting.Status.in : GlobalRouting.Status.out,
GlobalRouting.Agent.operator, Instant.EPOCH);
zoneStatusToSlime(zoneObject, zone, globalRouting, RoutingMethod.shared);
}
}
private HttpResponse setDeploymentStatus(Path path, boolean in) {
var deployment = deploymentFrom(path);
var instance = controller.applications().requireInstance(deployment.applicationId());
var status = in ? GlobalRouting.Status.in : GlobalRouting.Status.out;
var agent = GlobalRouting.Agent.operator;
requireDeployment(deployment, instance);
if (rotationCanRouteTo(deployment.zoneId())) {
var endpointStatus = new EndpointStatus(in ? EndpointStatus.Status.in : EndpointStatus.Status.out, "",
agent.name(),
controller.clock().instant().getEpochSecond());
controller.routing().setGlobalRotationStatus(deployment, endpointStatus);
}
controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent);
return new MessageResponse("Set global routing status for " + deployment + " to " + (in ? "IN" : "OUT"));
}
private HttpResponse deployment(Path path) {
var slime = new Slime();
var root = slime.setObject();
var deploymentId = deploymentFrom(path);
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
toSlime(List.of(application), deploymentId.applicationId(), deploymentId.zoneId(), root);
return new SlimeJsonResponse(slime);
}
private void toSlime(List<Application> applications, ApplicationId instanceId, ZoneId zoneId, Cursor root) {
var deploymentsArray = root.setArray("deployments");
for (var application : applications) {
var instances = instanceId == null
? application.instances().values()
: List.of(application.instances().get(instanceId.instance()));
for (var instance : instances) {
var zones = zoneId == null
? instance.deployments().keySet().stream().sorted(Comparator.comparing(ZoneId::value))
.collect(Collectors.toList())
: List.of(zoneId);
for (var zone : zones) {
var deploymentId = requireDeployment(new DeploymentId(instance.id(), zone), instance);
sharedGlobalRoutingStatus(deploymentId).ifPresent(status -> {
deploymentStatusToSlime(deploymentsArray.addObject(), deploymentId, status, RoutingMethod.shared);
});
directGlobalRoutingStatus(deploymentId).forEach(status -> {
deploymentStatusToSlime(deploymentsArray.addObject(), deploymentId, status, RoutingMethod.exclusive);
});
}
}
}
}
private Optional<GlobalRouting> sharedGlobalRoutingStatus(DeploymentId deploymentId) {
if (rotationCanRouteTo(deploymentId.zoneId())) {
var rotationStatus = controller.routing().globalRotationStatus(deploymentId);
var endpointStatus = rotationStatus.values().stream().findFirst();
if (endpointStatus.isPresent()) {
var changedAt = Instant.ofEpochSecond(endpointStatus.get().getEpoch());
GlobalRouting.Agent agent;
try {
agent = GlobalRouting.Agent.valueOf(endpointStatus.get().getAgent());
} catch (IllegalArgumentException e) {
agent = GlobalRouting.Agent.unknown;
}
var status = endpointStatus.get().getStatus() == EndpointStatus.Status.in
? GlobalRouting.Status.in
: GlobalRouting.Status.out;
return Optional.of(new GlobalRouting(status, agent, changedAt));
}
}
return Optional.empty();
}
private List<GlobalRouting> directGlobalRoutingStatus(DeploymentId deploymentId) {
return controller.routing().policies().get(deploymentId).values().stream()
.filter(p -> ! p.endpoints().isEmpty())
.filter(p -> controller.zoneRegistry().routingMethods(p.id().zone()).contains(RoutingMethod.exclusive))
.map(p -> p.status().globalRouting())
.collect(Collectors.toList());
}
/** Returns whether a rotation can route traffic to given zone */
private boolean rotationCanRouteTo(ZoneId zone) {
return controller.zoneRegistry().routingMethods(zone).stream().anyMatch(RoutingMethod::isShared);
}
private static void zoneStatusToSlime(Cursor object, ZoneId zone, GlobalRouting globalRouting, RoutingMethod method) {
object.setString("routingMethod", asString(method));
object.setString("environment", zone.environment().value());
object.setString("region", zone.region().value());
object.setString("status", asString(globalRouting.status()));
object.setString("agent", asString(globalRouting.agent()));
object.setLong("changedAt", globalRouting.changedAt().toEpochMilli());
}
private static void deploymentStatusToSlime(Cursor object, DeploymentId deployment, GlobalRouting globalRouting, RoutingMethod method) {
object.setString("routingMethod", asString(method));
object.setString("instance", deployment.applicationId().serializedForm());
object.setString("environment", deployment.zoneId().environment().value());
object.setString("region", deployment.zoneId().region().value());
object.setString("status", asString(globalRouting.status()));
object.setString("agent", asString(globalRouting.agent()));
object.setLong("changedAt", globalRouting.changedAt().toEpochMilli());
}
private static void endpointToSlime(Cursor object, Endpoint endpoint) {
object.setString("name", endpoint.name());
object.setString("dnsName", endpoint.dnsName());
object.setString("routingMethod", endpoint.routingMethod().name());
object.setString("cluster", endpoint.cluster().value());
object.setString("scope", endpoint.scope().name());
}
private TenantName tenantFrom(Path path) {
return TenantName.from(path.get("tenant"));
}
private ApplicationName applicationFrom(Path path) {
return ApplicationName.from(path.get("application"));
}
private TenantAndApplicationId tenantAndApplicationIdFrom(Path path) {
return TenantAndApplicationId.from(tenantFrom(path), applicationFrom(path));
}
private ApplicationId instanceFrom(Path path) {
return ApplicationId.from(tenantFrom(path), applicationFrom(path), InstanceName.from(path.get("instance")));
}
private DeploymentId deploymentFrom(Path path) {
return new DeploymentId(instanceFrom(path), zoneFrom(path));
}
private ZoneId zoneFrom(Path path) {
var zone = ZoneId.from(path.get("environment"), path.get("region"));
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("No such zone: " + zone);
}
return zone;
}
private static DeploymentId requireDeployment(DeploymentId deployment, Instance instance) {
if (!instance.deployments().containsKey(deployment.zoneId())) {
throw new IllegalArgumentException("No such deployment: " + deployment);
}
return deployment;
}
private static boolean isRecursive(HttpRequest request) {
return "true".equals(request.getProperty("recursive"));
}
private static String asString(GlobalRouting.Status status) {
switch (status) {
case in: return "in";
case out: return "out";
default: return "unknown";
}
}
private static String asString(GlobalRouting.Agent agent) {
switch (agent) {
case operator: return "operator";
case system: return "system";
case tenant: return "tenant";
default: return "unknown";
}
}
private static String asString(RoutingMethod method) {
switch (method) {
case shared: return "shared";
case exclusive: return "exclusive";
case sharedLayer4: return "sharedLayer4";
default: return "unknown";
}
}
} |
This will return even if wantedState is SUSPENDED, the host is suspended, and a child is resumed. Which is one of the use-case you're trying to fix? Case B: If the wanted state is RESUMED and !hostIsActiveInNR, then I believe isSuspended() is true since it is UNKNOWN and != NO_REMARKS. As it is now it would mean printing "State changed from ... to ..." every tick. Instead we'd like to return here. | public void converge(State wantedState) {
NodeSpec node = nodeRepository.getNode(hostHostname);
boolean hostIsActiveInNR = node.state() == NodeState.active;
if (wantedState == RESUMED) {
adjustNodeAgentsToRunFromNodeRepository();
} else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) {
adjustNodeAgentsToRunFromNodeRepository();
nodeAdmin.setFrozen(false);
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick");
}
boolean wantFrozen = wantedState != RESUMED;
if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return;
currentState = TRANSITIONING;
if (!nodeAdmin.setFrozen(wantFrozen))
throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen"));
switch (wantedState) {
case RESUMED:
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
break;
case SUSPENDED_NODE_ADMIN:
if (hostIsActiveInNR) orchestrator.suspend(hostHostname);
break;
case SUSPENDED:
List<String> nodesInActiveState = getNodesInActiveState();
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
if (hostIsActiveInNR) nodesToSuspend.add(hostHostname);
if (!nodesToSuspend.isEmpty()) {
orchestrator.suspend(hostHostname, nodesToSuspend);
log.info("Orchestrator allows suspension of " + nodesToSuspend);
}
nodeAdmin.stopNodeAgentServices();
break;
default:
throw new IllegalStateException("Unknown wanted state " + wantedState);
}
log.info("State changed from " + currentState + " to " + wantedState);
currentState = wantedState;
} | if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return; | public void converge(State wantedState) {
NodeSpec node = nodeRepository.getNode(hostHostname);
boolean hostIsActiveInNR = node.state() == NodeState.active;
if (wantedState == RESUMED) {
adjustNodeAgentsToRunFromNodeRepository();
} else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) {
adjustNodeAgentsToRunFromNodeRepository();
nodeAdmin.setFrozen(false);
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick");
}
boolean wantFrozen = wantedState != RESUMED;
if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return;
currentState = TRANSITIONING;
if (!nodeAdmin.setFrozen(wantFrozen))
throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen"));
switch (wantedState) {
case RESUMED:
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
break;
case SUSPENDED_NODE_ADMIN:
if (hostIsActiveInNR) orchestrator.suspend(hostHostname);
break;
case SUSPENDED:
List<String> nodesInActiveState = getNodesInActiveState();
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
if (hostIsActiveInNR) nodesToSuspend.add(hostHostname);
if (!nodesToSuspend.isEmpty()) {
orchestrator.suspend(hostHostname, nodesToSuspend);
log.info("Orchestrator allows suspension of " + nodesToSuspend);
}
nodeAdmin.stopNodeAgentServices();
break;
default:
throw new IllegalStateException("Unknown wanted state " + wantedState);
}
log.info("State changed from " + currentState + " to " + wantedState);
currentState = wantedState;
} | class NodeAdminStateUpdater {
private static final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName());
private static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5);
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentContextFactory nodeAgentContextFactory;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final NodeAdmin nodeAdmin;
private final String hostHostname;
public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED }
private volatile State currentState = SUSPENDED_NODE_ADMIN;
public NodeAdminStateUpdater(
NodeAgentContextFactory nodeAgentContextFactory,
NodeRepository nodeRepository,
Orchestrator orchestrator,
NodeAdmin nodeAdmin,
HostName hostHostname,
Clock clock,
FlagSource flagSource) {
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.nodeAdmin = nodeAdmin;
this.hostHostname = hostHostname.value();
}
public void start() {
nodeAdmin.start();
EnumSet<State> suspendedStates = EnumSet.of(SUSPENDED_NODE_ADMIN, SUSPENDED);
metricsScheduler.scheduleAtFixedRate(() -> {
try {
nodeAdmin.updateMetrics(suspendedStates.contains(currentState));
} catch (Throwable e) {
log.log(Level.WARNING, "Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
}
public void stop() {
metricsScheduler.shutdown();
nodeAdmin.stop();
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
log.info("Was interrupted while waiting for metricsScheduler and shutdown");
}
} while (!metricsScheduler.isTerminated());
}
/**
* This method attempts to converge node-admin w/agents to a {@link State}
* with respect to: freeze, Orchestrator, and services running.
*/
void adjustNodeAgentsToRunFromNodeRepository() {
try {
Map<String, Acl> aclByHostname = nodeRepository.getAcls(hostHostname);
Set<NodeAgentContext> nodeAgentContexts = nodeRepository.getNodes(hostHostname).stream()
.map(node -> nodeAgentContextFactory.create(node, aclByHostname.getOrDefault(node.hostname(), Acl.EMPTY)))
.collect(Collectors.toSet());
nodeAdmin.refreshContainersToRun(nodeAgentContexts);
} catch (ConvergenceException e) {
log.log(Level.WARNING, "Failed to update which containers should be running: " + Exceptions.toMessageString(e));
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to update which containers should be running", e);
}
}
private List<String> getNodesInActiveState() {
return nodeRepository.getNodes(hostHostname)
.stream()
.filter(node -> node.state() == NodeState.active)
.map(NodeSpec::hostname)
.collect(Collectors.toList());
}
} | class NodeAdminStateUpdater {
private static final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName());
private static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5);
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentContextFactory nodeAgentContextFactory;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final NodeAdmin nodeAdmin;
private final String hostHostname;
public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED }
private volatile State currentState = SUSPENDED_NODE_ADMIN;
public NodeAdminStateUpdater(
NodeAgentContextFactory nodeAgentContextFactory,
NodeRepository nodeRepository,
Orchestrator orchestrator,
NodeAdmin nodeAdmin,
HostName hostHostname,
Clock clock,
FlagSource flagSource) {
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.nodeAdmin = nodeAdmin;
this.hostHostname = hostHostname.value();
}
public void start() {
nodeAdmin.start();
EnumSet<State> suspendedStates = EnumSet.of(SUSPENDED_NODE_ADMIN, SUSPENDED);
metricsScheduler.scheduleAtFixedRate(() -> {
try {
nodeAdmin.updateMetrics(suspendedStates.contains(currentState));
} catch (Throwable e) {
log.log(Level.WARNING, "Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
}
public void stop() {
metricsScheduler.shutdown();
nodeAdmin.stop();
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
log.info("Was interrupted while waiting for metricsScheduler and shutdown");
}
} while (!metricsScheduler.isTerminated());
}
/**
* This method attempts to converge node-admin w/agents to a {@link State}
* with respect to: freeze, Orchestrator, and services running.
*/
void adjustNodeAgentsToRunFromNodeRepository() {
try {
Map<String, Acl> aclByHostname = nodeRepository.getAcls(hostHostname);
Set<NodeAgentContext> nodeAgentContexts = nodeRepository.getNodes(hostHostname).stream()
.map(node -> nodeAgentContextFactory.create(node, aclByHostname.getOrDefault(node.hostname(), Acl.EMPTY)))
.collect(Collectors.toSet());
nodeAdmin.refreshContainersToRun(nodeAgentContexts);
} catch (ConvergenceException e) {
log.log(Level.WARNING, "Failed to update which containers should be running: " + Exceptions.toMessageString(e));
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to update which containers should be running", e);
}
}
private List<String> getNodesInActiveState() {
return nodeRepository.getNodes(hostHostname)
.stream()
.filter(node -> node.state() == NodeState.active)
.map(NodeSpec::hostname)
.collect(Collectors.toList());
}
} |
The main use-case is when HA converges before switching to real, typically it is resuming after updating HA. After it switches, it wont resume against real and the upgrade halts. Case B: As far as I can see from the code, `orchestratorStatus` is not set in node response if the node is not allocated, that is also the case for `NO_REMARKS`, so it should be `NO_REMARKS` in `NodeSpec::orchestratorStatus`? | public void converge(State wantedState) {
NodeSpec node = nodeRepository.getNode(hostHostname);
boolean hostIsActiveInNR = node.state() == NodeState.active;
if (wantedState == RESUMED) {
adjustNodeAgentsToRunFromNodeRepository();
} else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) {
adjustNodeAgentsToRunFromNodeRepository();
nodeAdmin.setFrozen(false);
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick");
}
boolean wantFrozen = wantedState != RESUMED;
if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return;
currentState = TRANSITIONING;
if (!nodeAdmin.setFrozen(wantFrozen))
throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen"));
switch (wantedState) {
case RESUMED:
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
break;
case SUSPENDED_NODE_ADMIN:
if (hostIsActiveInNR) orchestrator.suspend(hostHostname);
break;
case SUSPENDED:
List<String> nodesInActiveState = getNodesInActiveState();
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
if (hostIsActiveInNR) nodesToSuspend.add(hostHostname);
if (!nodesToSuspend.isEmpty()) {
orchestrator.suspend(hostHostname, nodesToSuspend);
log.info("Orchestrator allows suspension of " + nodesToSuspend);
}
nodeAdmin.stopNodeAgentServices();
break;
default:
throw new IllegalStateException("Unknown wanted state " + wantedState);
}
log.info("State changed from " + currentState + " to " + wantedState);
currentState = wantedState;
} | if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return; | public void converge(State wantedState) {
NodeSpec node = nodeRepository.getNode(hostHostname);
boolean hostIsActiveInNR = node.state() == NodeState.active;
if (wantedState == RESUMED) {
adjustNodeAgentsToRunFromNodeRepository();
} else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) {
adjustNodeAgentsToRunFromNodeRepository();
nodeAdmin.setFrozen(false);
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick");
}
boolean wantFrozen = wantedState != RESUMED;
if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return;
currentState = TRANSITIONING;
if (!nodeAdmin.setFrozen(wantFrozen))
throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen"));
switch (wantedState) {
case RESUMED:
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
break;
case SUSPENDED_NODE_ADMIN:
if (hostIsActiveInNR) orchestrator.suspend(hostHostname);
break;
case SUSPENDED:
List<String> nodesInActiveState = getNodesInActiveState();
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
if (hostIsActiveInNR) nodesToSuspend.add(hostHostname);
if (!nodesToSuspend.isEmpty()) {
orchestrator.suspend(hostHostname, nodesToSuspend);
log.info("Orchestrator allows suspension of " + nodesToSuspend);
}
nodeAdmin.stopNodeAgentServices();
break;
default:
throw new IllegalStateException("Unknown wanted state " + wantedState);
}
log.info("State changed from " + currentState + " to " + wantedState);
currentState = wantedState;
} | class NodeAdminStateUpdater {
private static final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName());
private static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5);
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentContextFactory nodeAgentContextFactory;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final NodeAdmin nodeAdmin;
private final String hostHostname;
public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED }
private volatile State currentState = SUSPENDED_NODE_ADMIN;
public NodeAdminStateUpdater(
NodeAgentContextFactory nodeAgentContextFactory,
NodeRepository nodeRepository,
Orchestrator orchestrator,
NodeAdmin nodeAdmin,
HostName hostHostname,
Clock clock,
FlagSource flagSource) {
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.nodeAdmin = nodeAdmin;
this.hostHostname = hostHostname.value();
}
public void start() {
nodeAdmin.start();
EnumSet<State> suspendedStates = EnumSet.of(SUSPENDED_NODE_ADMIN, SUSPENDED);
metricsScheduler.scheduleAtFixedRate(() -> {
try {
nodeAdmin.updateMetrics(suspendedStates.contains(currentState));
} catch (Throwable e) {
log.log(Level.WARNING, "Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
}
public void stop() {
metricsScheduler.shutdown();
nodeAdmin.stop();
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
log.info("Was interrupted while waiting for metricsScheduler and shutdown");
}
} while (!metricsScheduler.isTerminated());
}
/**
* This method attempts to converge node-admin w/agents to a {@link State}
* with respect to: freeze, Orchestrator, and services running.
*/
void adjustNodeAgentsToRunFromNodeRepository() {
try {
Map<String, Acl> aclByHostname = nodeRepository.getAcls(hostHostname);
Set<NodeAgentContext> nodeAgentContexts = nodeRepository.getNodes(hostHostname).stream()
.map(node -> nodeAgentContextFactory.create(node, aclByHostname.getOrDefault(node.hostname(), Acl.EMPTY)))
.collect(Collectors.toSet());
nodeAdmin.refreshContainersToRun(nodeAgentContexts);
} catch (ConvergenceException e) {
log.log(Level.WARNING, "Failed to update which containers should be running: " + Exceptions.toMessageString(e));
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to update which containers should be running", e);
}
}
private List<String> getNodesInActiveState() {
return nodeRepository.getNodes(hostHostname)
.stream()
.filter(node -> node.state() == NodeState.active)
.map(NodeSpec::hostname)
.collect(Collectors.toList());
}
} | class NodeAdminStateUpdater {
private static final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName());
private static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5);
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentContextFactory nodeAgentContextFactory;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final NodeAdmin nodeAdmin;
private final String hostHostname;
public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED }
private volatile State currentState = SUSPENDED_NODE_ADMIN;
public NodeAdminStateUpdater(
NodeAgentContextFactory nodeAgentContextFactory,
NodeRepository nodeRepository,
Orchestrator orchestrator,
NodeAdmin nodeAdmin,
HostName hostHostname,
Clock clock,
FlagSource flagSource) {
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.nodeAdmin = nodeAdmin;
this.hostHostname = hostHostname.value();
}
public void start() {
nodeAdmin.start();
EnumSet<State> suspendedStates = EnumSet.of(SUSPENDED_NODE_ADMIN, SUSPENDED);
metricsScheduler.scheduleAtFixedRate(() -> {
try {
nodeAdmin.updateMetrics(suspendedStates.contains(currentState));
} catch (Throwable e) {
log.log(Level.WARNING, "Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
}
public void stop() {
metricsScheduler.shutdown();
nodeAdmin.stop();
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
log.info("Was interrupted while waiting for metricsScheduler and shutdown");
}
} while (!metricsScheduler.isTerminated());
}
/**
* This method attempts to converge node-admin w/agents to a {@link State}
* with respect to: freeze, Orchestrator, and services running.
*/
void adjustNodeAgentsToRunFromNodeRepository() {
try {
Map<String, Acl> aclByHostname = nodeRepository.getAcls(hostHostname);
Set<NodeAgentContext> nodeAgentContexts = nodeRepository.getNodes(hostHostname).stream()
.map(node -> nodeAgentContextFactory.create(node, aclByHostname.getOrDefault(node.hostname(), Acl.EMPTY)))
.collect(Collectors.toSet());
nodeAdmin.refreshContainersToRun(nodeAgentContexts);
} catch (ConvergenceException e) {
log.log(Level.WARNING, "Failed to update which containers should be running: " + Exceptions.toMessageString(e));
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to update which containers should be running", e);
}
}
private List<String> getNodesInActiveState() {
return nodeRepository.getNodes(hostHostname)
.stream()
.filter(node -> node.state() == NodeState.active)
.map(NodeSpec::hostname)
.collect(Collectors.toList());
}
} |
Right, but you also mentioned operators: So if an operator forces resume of a child. Not a big deal. But the logic is a bit harder to follow since it doesn't match the below. Case B: Agree. | public void converge(State wantedState) {
NodeSpec node = nodeRepository.getNode(hostHostname);
boolean hostIsActiveInNR = node.state() == NodeState.active;
if (wantedState == RESUMED) {
adjustNodeAgentsToRunFromNodeRepository();
} else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) {
adjustNodeAgentsToRunFromNodeRepository();
nodeAdmin.setFrozen(false);
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick");
}
boolean wantFrozen = wantedState != RESUMED;
if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return;
currentState = TRANSITIONING;
if (!nodeAdmin.setFrozen(wantFrozen))
throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen"));
switch (wantedState) {
case RESUMED:
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
break;
case SUSPENDED_NODE_ADMIN:
if (hostIsActiveInNR) orchestrator.suspend(hostHostname);
break;
case SUSPENDED:
List<String> nodesInActiveState = getNodesInActiveState();
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
if (hostIsActiveInNR) nodesToSuspend.add(hostHostname);
if (!nodesToSuspend.isEmpty()) {
orchestrator.suspend(hostHostname, nodesToSuspend);
log.info("Orchestrator allows suspension of " + nodesToSuspend);
}
nodeAdmin.stopNodeAgentServices();
break;
default:
throw new IllegalStateException("Unknown wanted state " + wantedState);
}
log.info("State changed from " + currentState + " to " + wantedState);
currentState = wantedState;
} | if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return; | public void converge(State wantedState) {
NodeSpec node = nodeRepository.getNode(hostHostname);
boolean hostIsActiveInNR = node.state() == NodeState.active;
if (wantedState == RESUMED) {
adjustNodeAgentsToRunFromNodeRepository();
} else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) {
adjustNodeAgentsToRunFromNodeRepository();
nodeAdmin.setFrozen(false);
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick");
}
boolean wantFrozen = wantedState != RESUMED;
if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return;
currentState = TRANSITIONING;
if (!nodeAdmin.setFrozen(wantFrozen))
throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen"));
switch (wantedState) {
case RESUMED:
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
break;
case SUSPENDED_NODE_ADMIN:
if (hostIsActiveInNR) orchestrator.suspend(hostHostname);
break;
case SUSPENDED:
List<String> nodesInActiveState = getNodesInActiveState();
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
if (hostIsActiveInNR) nodesToSuspend.add(hostHostname);
if (!nodesToSuspend.isEmpty()) {
orchestrator.suspend(hostHostname, nodesToSuspend);
log.info("Orchestrator allows suspension of " + nodesToSuspend);
}
nodeAdmin.stopNodeAgentServices();
break;
default:
throw new IllegalStateException("Unknown wanted state " + wantedState);
}
log.info("State changed from " + currentState + " to " + wantedState);
currentState = wantedState;
} | class NodeAdminStateUpdater {
private static final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName());
private static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5);
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentContextFactory nodeAgentContextFactory;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final NodeAdmin nodeAdmin;
private final String hostHostname;
public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED }
private volatile State currentState = SUSPENDED_NODE_ADMIN;
public NodeAdminStateUpdater(
NodeAgentContextFactory nodeAgentContextFactory,
NodeRepository nodeRepository,
Orchestrator orchestrator,
NodeAdmin nodeAdmin,
HostName hostHostname,
Clock clock,
FlagSource flagSource) {
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.nodeAdmin = nodeAdmin;
this.hostHostname = hostHostname.value();
}
public void start() {
nodeAdmin.start();
EnumSet<State> suspendedStates = EnumSet.of(SUSPENDED_NODE_ADMIN, SUSPENDED);
metricsScheduler.scheduleAtFixedRate(() -> {
try {
nodeAdmin.updateMetrics(suspendedStates.contains(currentState));
} catch (Throwable e) {
log.log(Level.WARNING, "Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
}
public void stop() {
metricsScheduler.shutdown();
nodeAdmin.stop();
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
log.info("Was interrupted while waiting for metricsScheduler and shutdown");
}
} while (!metricsScheduler.isTerminated());
}
/**
* This method attempts to converge node-admin w/agents to a {@link State}
* with respect to: freeze, Orchestrator, and services running.
*/
void adjustNodeAgentsToRunFromNodeRepository() {
try {
Map<String, Acl> aclByHostname = nodeRepository.getAcls(hostHostname);
Set<NodeAgentContext> nodeAgentContexts = nodeRepository.getNodes(hostHostname).stream()
.map(node -> nodeAgentContextFactory.create(node, aclByHostname.getOrDefault(node.hostname(), Acl.EMPTY)))
.collect(Collectors.toSet());
nodeAdmin.refreshContainersToRun(nodeAgentContexts);
} catch (ConvergenceException e) {
log.log(Level.WARNING, "Failed to update which containers should be running: " + Exceptions.toMessageString(e));
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to update which containers should be running", e);
}
}
private List<String> getNodesInActiveState() {
return nodeRepository.getNodes(hostHostname)
.stream()
.filter(node -> node.state() == NodeState.active)
.map(NodeSpec::hostname)
.collect(Collectors.toList());
}
} | class NodeAdminStateUpdater {
private static final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName());
private static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5);
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
private final NodeAgentContextFactory nodeAgentContextFactory;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final NodeAdmin nodeAdmin;
private final String hostHostname;
public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED }
private volatile State currentState = SUSPENDED_NODE_ADMIN;
public NodeAdminStateUpdater(
NodeAgentContextFactory nodeAgentContextFactory,
NodeRepository nodeRepository,
Orchestrator orchestrator,
NodeAdmin nodeAdmin,
HostName hostHostname,
Clock clock,
FlagSource flagSource) {
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.nodeAdmin = nodeAdmin;
this.hostHostname = hostHostname.value();
}
public void start() {
nodeAdmin.start();
EnumSet<State> suspendedStates = EnumSet.of(SUSPENDED_NODE_ADMIN, SUSPENDED);
metricsScheduler.scheduleAtFixedRate(() -> {
try {
nodeAdmin.updateMetrics(suspendedStates.contains(currentState));
} catch (Throwable e) {
log.log(Level.WARNING, "Metric fetcher scheduler failed", e);
}
}, 10, 55, TimeUnit.SECONDS);
}
public void stop() {
metricsScheduler.shutdown();
nodeAdmin.stop();
do {
try {
metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
log.info("Was interrupted while waiting for metricsScheduler and shutdown");
}
} while (!metricsScheduler.isTerminated());
}
/**
* This method attempts to converge node-admin w/agents to a {@link State}
* with respect to: freeze, Orchestrator, and services running.
*/
void adjustNodeAgentsToRunFromNodeRepository() {
try {
Map<String, Acl> aclByHostname = nodeRepository.getAcls(hostHostname);
Set<NodeAgentContext> nodeAgentContexts = nodeRepository.getNodes(hostHostname).stream()
.map(node -> nodeAgentContextFactory.create(node, aclByHostname.getOrDefault(node.hostname(), Acl.EMPTY)))
.collect(Collectors.toSet());
nodeAdmin.refreshContainersToRun(nodeAgentContexts);
} catch (ConvergenceException e) {
log.log(Level.WARNING, "Failed to update which containers should be running: " + Exceptions.toMessageString(e));
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to update which containers should be running", e);
}
}
private List<String> getNodesInActiveState() {
return nodeRepository.getNodes(hostHostname)
.stream()
.filter(node -> node.state() == NodeState.active)
.map(NodeSpec::hostname)
.collect(Collectors.toList());
}
} |
What about the distributor!? :) | private StorageNode buildSingleNode(DeployState deployState, ContentCluster parent) {
int distributionKey = 0;
StorageNode sNode = new StorageNode(deployState.getProperties(), parent.getStorageCluster(), 1.0, distributionKey , false);
sNode.setHostResource(parent.hostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC));
sNode.initService(deployLogger);
PersistenceEngine provider = parent.getPersistence().create(deployState, sNode, storageGroup, null);
new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider);
return sNode;
} | sNode.initService(deployLogger); | private StorageNode buildSingleNode(DeployState deployState, ContentCluster parent) {
int distributionKey = 0;
StorageNode searchNode = new StorageNode(deployState.getProperties(), parent.getStorageCluster(), 1.0, distributionKey , false);
searchNode.setHostResource(parent.hostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC));
PersistenceEngine provider = parent.getPersistence().create(deployState, searchNode, storageGroup, null);
searchNode.initService(deployLogger);
Distributor distributor = new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider);
distributor.setHostResource(searchNode.getHostResource());
distributor.initService(deployLogger);
return searchNode;
} | class GroupBuilder {
private final StorageGroup storageGroup;
/* The explicitly defined subgroups of this */
private final List<GroupBuilder> subGroups;
private final List<XmlNodeBuilder> nodeBuilders;
/** The nodes explicitly specified as a nodes tag in this group, or empty if none */
private final Optional<NodesSpecification> nodeRequirement;
private final DeployLogger deployLogger;
private GroupBuilder(StorageGroup storageGroup, List<GroupBuilder> subGroups, List<XmlNodeBuilder> nodeBuilders,
Optional<NodesSpecification> nodeRequirement, DeployLogger deployLogger) {
this.storageGroup = storageGroup;
this.subGroups = subGroups;
this.nodeBuilders = nodeBuilders;
this.nodeRequirement = nodeRequirement;
this.deployLogger = deployLogger;
}
/**
* Builds a storage group for a nonhosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildNonHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildNonHosted(deployState, owner, Optional.of(this)));
}
for (XmlNodeBuilder nodeBuilder : nodeBuilders) {
storageGroup.nodes.add(nodeBuilder.build(deployState, owner, storageGroup));
}
if (parent.isEmpty() && subGroups.isEmpty() && nodeBuilders.isEmpty()) {
storageGroup.nodes.add(buildSingleNode(deployState, owner));
}
return storageGroup;
}
/**
* Builds a storage group for a hosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
if (storageGroup.getIndex() != null)
throw new IllegalArgumentException("Specifying individual groups is not supported on hosted applications");
Map<HostResource, ClusterMembership> hostMapping =
nodeRequirement.isPresent() ?
provisionHosts(nodeRequirement.get(), owner.getStorageCluster().getClusterName(), owner.getRoot().hostSystem(), deployLogger) :
Collections.emptyMap();
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroups = collectAllocatedSubgroups(hostMapping);
if (hostGroups.size() > 1) {
if (parent.isPresent())
throw new IllegalArgumentException("Cannot specify groups using the groups attribute in nested content groups");
for (Map.Entry<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroup : hostGroups.entrySet()) {
String groupIndex = String.valueOf(hostGroup.getKey().get().index());
StorageGroup subgroup = new StorageGroup(true, groupIndex, groupIndex);
for (Map.Entry<HostResource, ClusterMembership> host : hostGroup.getValue().entrySet()) {
subgroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), subgroup, host.getValue()));
}
storageGroup.subgroups.add(subgroup);
}
}
else {
for (Map.Entry<HostResource, ClusterMembership> host : hostMapping.entrySet()) {
storageGroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), storageGroup, host.getValue()));
}
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildHosted(deployState, owner, Optional.of(this)));
}
}
return storageGroup;
}
/** Collect hosts per group */
private Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> collectAllocatedSubgroups(Map<HostResource, ClusterMembership> hostMapping) {
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostsPerGroup = new LinkedHashMap<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hostMapping.entrySet()) {
Optional<ClusterSpec.Group> group = entry.getValue().cluster().group();
Map<HostResource, ClusterMembership> hostsInGroup = hostsPerGroup.get(group);
if (hostsInGroup == null) {
hostsInGroup = new LinkedHashMap<>();
hostsPerGroup.put(group, hostsInGroup);
}
hostsInGroup.put(entry.getKey(), entry.getValue());
}
return hostsPerGroup;
}
} | class GroupBuilder {
private final StorageGroup storageGroup;
/* The explicitly defined subgroups of this */
private final List<GroupBuilder> subGroups;
private final List<XmlNodeBuilder> nodeBuilders;
/** The nodes explicitly specified as a nodes tag in this group, or empty if none */
private final Optional<NodesSpecification> nodeRequirement;
private final DeployLogger deployLogger;
private GroupBuilder(StorageGroup storageGroup, List<GroupBuilder> subGroups, List<XmlNodeBuilder> nodeBuilders,
Optional<NodesSpecification> nodeRequirement, DeployLogger deployLogger) {
this.storageGroup = storageGroup;
this.subGroups = subGroups;
this.nodeBuilders = nodeBuilders;
this.nodeRequirement = nodeRequirement;
this.deployLogger = deployLogger;
}
/**
* Builds a storage group for a nonhosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildNonHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildNonHosted(deployState, owner, Optional.of(this)));
}
for (XmlNodeBuilder nodeBuilder : nodeBuilders) {
storageGroup.nodes.add(nodeBuilder.build(deployState, owner, storageGroup));
}
if (parent.isEmpty() && subGroups.isEmpty() && nodeBuilders.isEmpty()) {
storageGroup.nodes.add(buildSingleNode(deployState, owner));
}
return storageGroup;
}
/**
* Builds a storage group for a hosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
if (storageGroup.getIndex() != null)
throw new IllegalArgumentException("Specifying individual groups is not supported on hosted applications");
Map<HostResource, ClusterMembership> hostMapping =
nodeRequirement.isPresent() ?
provisionHosts(nodeRequirement.get(), owner.getStorageCluster().getClusterName(), owner.getRoot().hostSystem(), deployLogger) :
Collections.emptyMap();
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroups = collectAllocatedSubgroups(hostMapping);
if (hostGroups.size() > 1) {
if (parent.isPresent())
throw new IllegalArgumentException("Cannot specify groups using the groups attribute in nested content groups");
for (Map.Entry<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroup : hostGroups.entrySet()) {
String groupIndex = String.valueOf(hostGroup.getKey().get().index());
StorageGroup subgroup = new StorageGroup(true, groupIndex, groupIndex);
for (Map.Entry<HostResource, ClusterMembership> host : hostGroup.getValue().entrySet()) {
subgroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), subgroup, host.getValue()));
}
storageGroup.subgroups.add(subgroup);
}
}
else {
for (Map.Entry<HostResource, ClusterMembership> host : hostMapping.entrySet()) {
storageGroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), storageGroup, host.getValue()));
}
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildHosted(deployState, owner, Optional.of(this)));
}
}
return storageGroup;
}
/** Collect hosts per group */
private Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> collectAllocatedSubgroups(Map<HostResource, ClusterMembership> hostMapping) {
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostsPerGroup = new LinkedHashMap<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hostMapping.entrySet()) {
Optional<ClusterSpec.Group> group = entry.getValue().cluster().group();
Map<HostResource, ClusterMembership> hostsInGroup = hostsPerGroup.get(group);
if (hostsInGroup == null) {
hostsInGroup = new LinkedHashMap<>();
hostsPerGroup.put(group, hostsInGroup);
}
hostsInGroup.put(entry.getKey(), entry.getValue());
}
return hostsPerGroup;
}
} |
handle null event.at ? | private static NodeSpec createNodeSpec(NodeRepositoryNode node) {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
NodeState nodeState = NodeState.valueOf(node.state);
Optional<NodeMembership> membership = Optional.ofNullable(node.membership)
.map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired));
NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of));
return new NodeSpec(
node.hostname,
Optional.ofNullable(node.openStackId),
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString),
nodeState,
nodeType,
node.flavor,
Optional.ofNullable(node.wantedVespaVersion).map(Version::fromString),
Optional.ofNullable(node.vespaVersion).map(Version::fromString),
Optional.ofNullable(node.wantedOsVersion).map(Version::fromString),
Optional.ofNullable(node.currentOsVersion).map(Version::fromString),
Optional.ofNullable(node.orchestratorStatus).map(OrchestratorStatus::fromString).orElse(OrchestratorStatus.NO_REMARKS),
Optional.ofNullable(node.owner).map(o -> ApplicationId.from(o.tenant, o.application, o.instance)),
membership,
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
Optional.ofNullable(node.wantedFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.modelName),
nodeResources(node.resources),
nodeResources(node.realResources),
node.ipAddresses,
node.additionalIpAddresses,
reports,
node.history.stream().map(event -> new Event(event.agent, event.event, Instant.ofEpochMilli(event.at))).collect(Collectors.toUnmodifiableList()),
Optional.ofNullable(node.parentHostname),
Optional.ofNullable(node.archiveUri).map(URI::create),
Optional.ofNullable(node.exclusiveTo).map(ApplicationId::fromSerializedForm));
} | node.history.stream().map(event -> new Event(event.agent, event.event, Instant.ofEpochMilli(event.at))).collect(Collectors.toUnmodifiableList()), | private static NodeSpec createNodeSpec(NodeRepositoryNode node) {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
NodeState nodeState = NodeState.valueOf(node.state);
Optional<NodeMembership> membership = Optional.ofNullable(node.membership)
.map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired));
NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of));
return new NodeSpec(
node.hostname,
Optional.ofNullable(node.openStackId),
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString),
nodeState,
nodeType,
node.flavor,
Optional.ofNullable(node.wantedVespaVersion).map(Version::fromString),
Optional.ofNullable(node.vespaVersion).map(Version::fromString),
Optional.ofNullable(node.wantedOsVersion).map(Version::fromString),
Optional.ofNullable(node.currentOsVersion).map(Version::fromString),
Optional.ofNullable(node.orchestratorStatus).map(OrchestratorStatus::fromString).orElse(OrchestratorStatus.NO_REMARKS),
Optional.ofNullable(node.owner).map(o -> ApplicationId.from(o.tenant, o.application, o.instance)),
membership,
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
Optional.ofNullable(node.wantedFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.modelName),
nodeResources(node.resources),
nodeResources(node.realResources),
node.ipAddresses,
node.additionalIpAddresses,
reports,
node.history.stream().map(event -> new Event(event.agent, event.event, Instant.ofEpochMilli(event.at))).collect(Collectors.toUnmodifiableList()),
Optional.ofNullable(node.parentHostname),
Optional.ofNullable(node.archiveUri).map(URI::create),
Optional.ofNullable(node.exclusiveTo).map(ApplicationId::fromSerializedForm));
} | class RealNodeRepository implements NodeRepository {
private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName());
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to add nodes: " + response.message + " " + response.errorCode);
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getOptionalNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec);
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toSet())));
Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(
node -> new Acl.Node(node.hostname, node.ipAddress),
Collectors.toSet())));
Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream()
.collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy,
Collectors.mapping(node -> node.network, Collectors.toSet())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname),
trustedNetworks.get(hostname))));
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to update node attributes: " + response.message + " " + response.errorCode);
}
@Override
public void setNodeState(String hostName, NodeState nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
logger.info(response.message);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode);
}
private static NodeResources nodeResources(NodeRepositoryNode.NodeResources nodeResources) {
return new NodeResources(
nodeResources.vcpu,
nodeResources.memoryGb,
nodeResources.diskGb,
nodeResources.bandwidthGbps,
diskSpeedFromString(nodeResources.diskSpeed),
storageTypeFromString(nodeResources.storageType));
}
private static NodeResources.DiskSpeed diskSpeedFromString(String diskSpeed) {
if (diskSpeed == null) return NodeResources.DiskSpeed.getDefault();
switch (diskSpeed) {
case "fast": return NodeResources.DiskSpeed.fast;
case "slow": return NodeResources.DiskSpeed.slow;
case "any": return NodeResources.DiskSpeed.any;
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromString(String storageType) {
if (storageType == null) return NodeResources.StorageType.getDefault();
switch (storageType) {
case "remote": return NodeResources.StorageType.remote;
case "local": return NodeResources.StorageType.local;
case "any": return NodeResources.StorageType.any;
default: throw new IllegalArgumentException("Unknown storage type '" + storageType + "'");
}
}
private static String toString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String toString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = addNode.id.orElse("fake-" + addNode.hostname);
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
addNode.nodeFlavor.ifPresent(f -> node.flavor = f);
addNode.flavorOverrides.flatMap(FlavorOverrides::diskGb).ifPresent(d -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.diskGb = d;
});
addNode.nodeResources.ifPresent(resources -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.vcpu = resources.vcpu();
node.resources.memoryGb = resources.memoryGb();
node.resources.diskGb = resources.diskGb();
node.resources.bandwidthGbps = resources.bandwidthGbps();
node.resources.diskSpeed = toString(resources.diskSpeed());
node.resources.storageType = toString(resources.storageType());
});
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
}
public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = nodeAttributes.getHostId().orElse(null);
node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null);
node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null);
node.vespaVersion = nodeAttributes.getVespaVersion().map(Version::toFullString).orElse(null);
node.currentOsVersion = nodeAttributes.getCurrentOsVersion().map(Version::toFullString).orElse(null);
node.currentFirmwareCheck = nodeAttributes.getCurrentFirmwareCheck().map(Instant::toEpochMilli).orElse(null);
Map<String, JsonNode> reports = nodeAttributes.getReports();
node.reports = reports == null || reports.isEmpty() ? null : new TreeMap<>(reports);
return node;
}
} | class RealNodeRepository implements NodeRepository {
private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName());
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to add nodes: " + response.message + " " + response.errorCode);
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getOptionalNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec);
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toSet())));
Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(
node -> new Acl.Node(node.hostname, node.ipAddress),
Collectors.toSet())));
Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream()
.collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy,
Collectors.mapping(node -> node.network, Collectors.toSet())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname),
trustedNetworks.get(hostname))));
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to update node attributes: " + response.message + " " + response.errorCode);
}
@Override
public void setNodeState(String hostName, NodeState nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
logger.info(response.message);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode);
}
private static NodeResources nodeResources(NodeRepositoryNode.NodeResources nodeResources) {
return new NodeResources(
nodeResources.vcpu,
nodeResources.memoryGb,
nodeResources.diskGb,
nodeResources.bandwidthGbps,
diskSpeedFromString(nodeResources.diskSpeed),
storageTypeFromString(nodeResources.storageType));
}
private static NodeResources.DiskSpeed diskSpeedFromString(String diskSpeed) {
if (diskSpeed == null) return NodeResources.DiskSpeed.getDefault();
switch (diskSpeed) {
case "fast": return NodeResources.DiskSpeed.fast;
case "slow": return NodeResources.DiskSpeed.slow;
case "any": return NodeResources.DiskSpeed.any;
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromString(String storageType) {
if (storageType == null) return NodeResources.StorageType.getDefault();
switch (storageType) {
case "remote": return NodeResources.StorageType.remote;
case "local": return NodeResources.StorageType.local;
case "any": return NodeResources.StorageType.any;
default: throw new IllegalArgumentException("Unknown storage type '" + storageType + "'");
}
}
private static String toString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String toString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = addNode.id.orElse("fake-" + addNode.hostname);
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
addNode.nodeFlavor.ifPresent(f -> node.flavor = f);
addNode.flavorOverrides.flatMap(FlavorOverrides::diskGb).ifPresent(d -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.diskGb = d;
});
addNode.nodeResources.ifPresent(resources -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.vcpu = resources.vcpu();
node.resources.memoryGb = resources.memoryGb();
node.resources.diskGb = resources.diskGb();
node.resources.bandwidthGbps = resources.bandwidthGbps();
node.resources.diskSpeed = toString(resources.diskSpeed());
node.resources.storageType = toString(resources.storageType());
});
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
}
public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = nodeAttributes.getHostId().orElse(null);
node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null);
node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null);
node.vespaVersion = nodeAttributes.getVespaVersion().map(Version::toFullString).orElse(null);
node.currentOsVersion = nodeAttributes.getCurrentOsVersion().map(Version::toFullString).orElse(null);
node.currentFirmwareCheck = nodeAttributes.getCurrentFirmwareCheck().map(Instant::toEpochMilli).orElse(null);
Map<String, JsonNode> reports = nodeAttributes.getReports();
node.reports = reports == null || reports.isEmpty() ? null : new TreeMap<>(reports);
return node;
}
} |
Cant be null, it is `long` | private static NodeSpec createNodeSpec(NodeRepositoryNode node) {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
NodeState nodeState = NodeState.valueOf(node.state);
Optional<NodeMembership> membership = Optional.ofNullable(node.membership)
.map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired));
NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of));
return new NodeSpec(
node.hostname,
Optional.ofNullable(node.openStackId),
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString),
nodeState,
nodeType,
node.flavor,
Optional.ofNullable(node.wantedVespaVersion).map(Version::fromString),
Optional.ofNullable(node.vespaVersion).map(Version::fromString),
Optional.ofNullable(node.wantedOsVersion).map(Version::fromString),
Optional.ofNullable(node.currentOsVersion).map(Version::fromString),
Optional.ofNullable(node.orchestratorStatus).map(OrchestratorStatus::fromString).orElse(OrchestratorStatus.NO_REMARKS),
Optional.ofNullable(node.owner).map(o -> ApplicationId.from(o.tenant, o.application, o.instance)),
membership,
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
Optional.ofNullable(node.wantedFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.modelName),
nodeResources(node.resources),
nodeResources(node.realResources),
node.ipAddresses,
node.additionalIpAddresses,
reports,
node.history.stream().map(event -> new Event(event.agent, event.event, Instant.ofEpochMilli(event.at))).collect(Collectors.toUnmodifiableList()),
Optional.ofNullable(node.parentHostname),
Optional.ofNullable(node.archiveUri).map(URI::create),
Optional.ofNullable(node.exclusiveTo).map(ApplicationId::fromSerializedForm));
} | node.history.stream().map(event -> new Event(event.agent, event.event, Instant.ofEpochMilli(event.at))).collect(Collectors.toUnmodifiableList()), | private static NodeSpec createNodeSpec(NodeRepositoryNode node) {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
NodeState nodeState = NodeState.valueOf(node.state);
Optional<NodeMembership> membership = Optional.ofNullable(node.membership)
.map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired));
NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of));
return new NodeSpec(
node.hostname,
Optional.ofNullable(node.openStackId),
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString),
nodeState,
nodeType,
node.flavor,
Optional.ofNullable(node.wantedVespaVersion).map(Version::fromString),
Optional.ofNullable(node.vespaVersion).map(Version::fromString),
Optional.ofNullable(node.wantedOsVersion).map(Version::fromString),
Optional.ofNullable(node.currentOsVersion).map(Version::fromString),
Optional.ofNullable(node.orchestratorStatus).map(OrchestratorStatus::fromString).orElse(OrchestratorStatus.NO_REMARKS),
Optional.ofNullable(node.owner).map(o -> ApplicationId.from(o.tenant, o.application, o.instance)),
membership,
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
Optional.ofNullable(node.wantedFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.modelName),
nodeResources(node.resources),
nodeResources(node.realResources),
node.ipAddresses,
node.additionalIpAddresses,
reports,
node.history.stream().map(event -> new Event(event.agent, event.event, Instant.ofEpochMilli(event.at))).collect(Collectors.toUnmodifiableList()),
Optional.ofNullable(node.parentHostname),
Optional.ofNullable(node.archiveUri).map(URI::create),
Optional.ofNullable(node.exclusiveTo).map(ApplicationId::fromSerializedForm));
} | class RealNodeRepository implements NodeRepository {
private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName());
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to add nodes: " + response.message + " " + response.errorCode);
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getOptionalNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec);
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toSet())));
Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(
node -> new Acl.Node(node.hostname, node.ipAddress),
Collectors.toSet())));
Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream()
.collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy,
Collectors.mapping(node -> node.network, Collectors.toSet())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname),
trustedNetworks.get(hostname))));
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to update node attributes: " + response.message + " " + response.errorCode);
}
@Override
public void setNodeState(String hostName, NodeState nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
logger.info(response.message);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode);
}
private static NodeResources nodeResources(NodeRepositoryNode.NodeResources nodeResources) {
return new NodeResources(
nodeResources.vcpu,
nodeResources.memoryGb,
nodeResources.diskGb,
nodeResources.bandwidthGbps,
diskSpeedFromString(nodeResources.diskSpeed),
storageTypeFromString(nodeResources.storageType));
}
private static NodeResources.DiskSpeed diskSpeedFromString(String diskSpeed) {
if (diskSpeed == null) return NodeResources.DiskSpeed.getDefault();
switch (diskSpeed) {
case "fast": return NodeResources.DiskSpeed.fast;
case "slow": return NodeResources.DiskSpeed.slow;
case "any": return NodeResources.DiskSpeed.any;
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromString(String storageType) {
if (storageType == null) return NodeResources.StorageType.getDefault();
switch (storageType) {
case "remote": return NodeResources.StorageType.remote;
case "local": return NodeResources.StorageType.local;
case "any": return NodeResources.StorageType.any;
default: throw new IllegalArgumentException("Unknown storage type '" + storageType + "'");
}
}
private static String toString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String toString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = addNode.id.orElse("fake-" + addNode.hostname);
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
addNode.nodeFlavor.ifPresent(f -> node.flavor = f);
addNode.flavorOverrides.flatMap(FlavorOverrides::diskGb).ifPresent(d -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.diskGb = d;
});
addNode.nodeResources.ifPresent(resources -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.vcpu = resources.vcpu();
node.resources.memoryGb = resources.memoryGb();
node.resources.diskGb = resources.diskGb();
node.resources.bandwidthGbps = resources.bandwidthGbps();
node.resources.diskSpeed = toString(resources.diskSpeed());
node.resources.storageType = toString(resources.storageType());
});
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
}
public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = nodeAttributes.getHostId().orElse(null);
node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null);
node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null);
node.vespaVersion = nodeAttributes.getVespaVersion().map(Version::toFullString).orElse(null);
node.currentOsVersion = nodeAttributes.getCurrentOsVersion().map(Version::toFullString).orElse(null);
node.currentFirmwareCheck = nodeAttributes.getCurrentFirmwareCheck().map(Instant::toEpochMilli).orElse(null);
Map<String, JsonNode> reports = nodeAttributes.getReports();
node.reports = reports == null || reports.isEmpty() ? null : new TreeMap<>(reports);
return node;
}
} | class RealNodeRepository implements NodeRepository {
private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName());
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to add nodes: " + response.message + " " + response.errorCode);
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getOptionalNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec);
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toSet())));
Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(
node -> new Acl.Node(node.hostname, node.ipAddress),
Collectors.toSet())));
Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream()
.collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy,
Collectors.mapping(node -> node.network, Collectors.toSet())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname),
trustedNetworks.get(hostname))));
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to update node attributes: " + response.message + " " + response.errorCode);
}
@Override
public void setNodeState(String hostName, NodeState nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
logger.info(response.message);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode);
}
private static NodeResources nodeResources(NodeRepositoryNode.NodeResources nodeResources) {
return new NodeResources(
nodeResources.vcpu,
nodeResources.memoryGb,
nodeResources.diskGb,
nodeResources.bandwidthGbps,
diskSpeedFromString(nodeResources.diskSpeed),
storageTypeFromString(nodeResources.storageType));
}
private static NodeResources.DiskSpeed diskSpeedFromString(String diskSpeed) {
if (diskSpeed == null) return NodeResources.DiskSpeed.getDefault();
switch (diskSpeed) {
case "fast": return NodeResources.DiskSpeed.fast;
case "slow": return NodeResources.DiskSpeed.slow;
case "any": return NodeResources.DiskSpeed.any;
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromString(String storageType) {
if (storageType == null) return NodeResources.StorageType.getDefault();
switch (storageType) {
case "remote": return NodeResources.StorageType.remote;
case "local": return NodeResources.StorageType.local;
case "any": return NodeResources.StorageType.any;
default: throw new IllegalArgumentException("Unknown storage type '" + storageType + "'");
}
}
private static String toString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String toString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = addNode.id.orElse("fake-" + addNode.hostname);
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
addNode.nodeFlavor.ifPresent(f -> node.flavor = f);
addNode.flavorOverrides.flatMap(FlavorOverrides::diskGb).ifPresent(d -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.diskGb = d;
});
addNode.nodeResources.ifPresent(resources -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.vcpu = resources.vcpu();
node.resources.memoryGb = resources.memoryGb();
node.resources.diskGb = resources.diskGb();
node.resources.bandwidthGbps = resources.bandwidthGbps();
node.resources.diskSpeed = toString(resources.diskSpeed());
node.resources.storageType = toString(resources.storageType());
});
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
}
public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = nodeAttributes.getHostId().orElse(null);
node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null);
node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null);
node.vespaVersion = nodeAttributes.getVespaVersion().map(Version::toFullString).orElse(null);
node.currentOsVersion = nodeAttributes.getCurrentOsVersion().map(Version::toFullString).orElse(null);
node.currentFirmwareCheck = nodeAttributes.getCurrentFirmwareCheck().map(Instant::toEpochMilli).orElse(null);
Map<String, JsonNode> reports = nodeAttributes.getReports();
node.reports = reports == null || reports.isEmpty() ? null : new TreeMap<>(reports);
return node;
}
} |
OK, handle that `at` is absent then? | private static NodeSpec createNodeSpec(NodeRepositoryNode node) {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
NodeState nodeState = NodeState.valueOf(node.state);
Optional<NodeMembership> membership = Optional.ofNullable(node.membership)
.map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired));
NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of));
return new NodeSpec(
node.hostname,
Optional.ofNullable(node.openStackId),
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString),
nodeState,
nodeType,
node.flavor,
Optional.ofNullable(node.wantedVespaVersion).map(Version::fromString),
Optional.ofNullable(node.vespaVersion).map(Version::fromString),
Optional.ofNullable(node.wantedOsVersion).map(Version::fromString),
Optional.ofNullable(node.currentOsVersion).map(Version::fromString),
Optional.ofNullable(node.orchestratorStatus).map(OrchestratorStatus::fromString).orElse(OrchestratorStatus.NO_REMARKS),
Optional.ofNullable(node.owner).map(o -> ApplicationId.from(o.tenant, o.application, o.instance)),
membership,
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
Optional.ofNullable(node.wantedFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.modelName),
nodeResources(node.resources),
nodeResources(node.realResources),
node.ipAddresses,
node.additionalIpAddresses,
reports,
node.history.stream().map(event -> new Event(event.agent, event.event, Instant.ofEpochMilli(event.at))).collect(Collectors.toUnmodifiableList()),
Optional.ofNullable(node.parentHostname),
Optional.ofNullable(node.archiveUri).map(URI::create),
Optional.ofNullable(node.exclusiveTo).map(ApplicationId::fromSerializedForm));
} | node.history.stream().map(event -> new Event(event.agent, event.event, Instant.ofEpochMilli(event.at))).collect(Collectors.toUnmodifiableList()), | private static NodeSpec createNodeSpec(NodeRepositoryNode node) {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
NodeState nodeState = NodeState.valueOf(node.state);
Optional<NodeMembership> membership = Optional.ofNullable(node.membership)
.map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired));
NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of));
return new NodeSpec(
node.hostname,
Optional.ofNullable(node.openStackId),
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString),
nodeState,
nodeType,
node.flavor,
Optional.ofNullable(node.wantedVespaVersion).map(Version::fromString),
Optional.ofNullable(node.vespaVersion).map(Version::fromString),
Optional.ofNullable(node.wantedOsVersion).map(Version::fromString),
Optional.ofNullable(node.currentOsVersion).map(Version::fromString),
Optional.ofNullable(node.orchestratorStatus).map(OrchestratorStatus::fromString).orElse(OrchestratorStatus.NO_REMARKS),
Optional.ofNullable(node.owner).map(o -> ApplicationId.from(o.tenant, o.application, o.instance)),
membership,
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
Optional.ofNullable(node.wantedFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.modelName),
nodeResources(node.resources),
nodeResources(node.realResources),
node.ipAddresses,
node.additionalIpAddresses,
reports,
node.history.stream().map(event -> new Event(event.agent, event.event, Instant.ofEpochMilli(event.at))).collect(Collectors.toUnmodifiableList()),
Optional.ofNullable(node.parentHostname),
Optional.ofNullable(node.archiveUri).map(URI::create),
Optional.ofNullable(node.exclusiveTo).map(ApplicationId::fromSerializedForm));
} | class RealNodeRepository implements NodeRepository {
private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName());
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to add nodes: " + response.message + " " + response.errorCode);
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getOptionalNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec);
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toSet())));
Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(
node -> new Acl.Node(node.hostname, node.ipAddress),
Collectors.toSet())));
Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream()
.collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy,
Collectors.mapping(node -> node.network, Collectors.toSet())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname),
trustedNetworks.get(hostname))));
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to update node attributes: " + response.message + " " + response.errorCode);
}
@Override
public void setNodeState(String hostName, NodeState nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
logger.info(response.message);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode);
}
private static NodeResources nodeResources(NodeRepositoryNode.NodeResources nodeResources) {
return new NodeResources(
nodeResources.vcpu,
nodeResources.memoryGb,
nodeResources.diskGb,
nodeResources.bandwidthGbps,
diskSpeedFromString(nodeResources.diskSpeed),
storageTypeFromString(nodeResources.storageType));
}
private static NodeResources.DiskSpeed diskSpeedFromString(String diskSpeed) {
if (diskSpeed == null) return NodeResources.DiskSpeed.getDefault();
switch (diskSpeed) {
case "fast": return NodeResources.DiskSpeed.fast;
case "slow": return NodeResources.DiskSpeed.slow;
case "any": return NodeResources.DiskSpeed.any;
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromString(String storageType) {
if (storageType == null) return NodeResources.StorageType.getDefault();
switch (storageType) {
case "remote": return NodeResources.StorageType.remote;
case "local": return NodeResources.StorageType.local;
case "any": return NodeResources.StorageType.any;
default: throw new IllegalArgumentException("Unknown storage type '" + storageType + "'");
}
}
private static String toString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String toString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = addNode.id.orElse("fake-" + addNode.hostname);
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
addNode.nodeFlavor.ifPresent(f -> node.flavor = f);
addNode.flavorOverrides.flatMap(FlavorOverrides::diskGb).ifPresent(d -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.diskGb = d;
});
addNode.nodeResources.ifPresent(resources -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.vcpu = resources.vcpu();
node.resources.memoryGb = resources.memoryGb();
node.resources.diskGb = resources.diskGb();
node.resources.bandwidthGbps = resources.bandwidthGbps();
node.resources.diskSpeed = toString(resources.diskSpeed());
node.resources.storageType = toString(resources.storageType());
});
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
}
public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = nodeAttributes.getHostId().orElse(null);
node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null);
node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null);
node.vespaVersion = nodeAttributes.getVespaVersion().map(Version::toFullString).orElse(null);
node.currentOsVersion = nodeAttributes.getCurrentOsVersion().map(Version::toFullString).orElse(null);
node.currentFirmwareCheck = nodeAttributes.getCurrentFirmwareCheck().map(Instant::toEpochMilli).orElse(null);
Map<String, JsonNode> reports = nodeAttributes.getReports();
node.reports = reports == null || reports.isEmpty() ? null : new TreeMap<>(reports);
return node;
}
} | class RealNodeRepository implements NodeRepository {
private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName());
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to add nodes: " + response.message + " " + response.errorCode);
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getOptionalNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec);
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toSet())));
Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(
node -> new Acl.Node(node.hostname, node.ipAddress),
Collectors.toSet())));
Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream()
.collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy,
Collectors.mapping(node -> node.network, Collectors.toSet())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname),
trustedNetworks.get(hostname))));
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to update node attributes: " + response.message + " " + response.errorCode);
}
@Override
public void setNodeState(String hostName, NodeState nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
logger.info(response.message);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode);
}
private static NodeResources nodeResources(NodeRepositoryNode.NodeResources nodeResources) {
return new NodeResources(
nodeResources.vcpu,
nodeResources.memoryGb,
nodeResources.diskGb,
nodeResources.bandwidthGbps,
diskSpeedFromString(nodeResources.diskSpeed),
storageTypeFromString(nodeResources.storageType));
}
private static NodeResources.DiskSpeed diskSpeedFromString(String diskSpeed) {
if (diskSpeed == null) return NodeResources.DiskSpeed.getDefault();
switch (diskSpeed) {
case "fast": return NodeResources.DiskSpeed.fast;
case "slow": return NodeResources.DiskSpeed.slow;
case "any": return NodeResources.DiskSpeed.any;
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed + "'");
}
}
private static NodeResources.StorageType storageTypeFromString(String storageType) {
if (storageType == null) return NodeResources.StorageType.getDefault();
switch (storageType) {
case "remote": return NodeResources.StorageType.remote;
case "local": return NodeResources.StorageType.local;
case "any": return NodeResources.StorageType.any;
default: throw new IllegalArgumentException("Unknown storage type '" + storageType + "'");
}
}
private static String toString(NodeResources.DiskSpeed diskSpeed) {
switch (diskSpeed) {
case fast : return "fast";
case slow : return "slow";
case any : return "any";
default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'");
}
}
private static String toString(NodeResources.StorageType storageType) {
switch (storageType) {
case remote : return "remote";
case local : return "local";
case any : return "any";
default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'");
}
}
private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = addNode.id.orElse("fake-" + addNode.hostname);
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
addNode.nodeFlavor.ifPresent(f -> node.flavor = f);
addNode.flavorOverrides.flatMap(FlavorOverrides::diskGb).ifPresent(d -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.diskGb = d;
});
addNode.nodeResources.ifPresent(resources -> {
node.resources = new NodeRepositoryNode.NodeResources();
node.resources.vcpu = resources.vcpu();
node.resources.memoryGb = resources.memoryGb();
node.resources.diskGb = resources.diskGb();
node.resources.bandwidthGbps = resources.bandwidthGbps();
node.resources.diskSpeed = toString(resources.diskSpeed());
node.resources.storageType = toString(resources.storageType());
});
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
}
public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = nodeAttributes.getHostId().orElse(null);
node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null);
node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null);
node.vespaVersion = nodeAttributes.getVespaVersion().map(Version::toFullString).orElse(null);
node.currentOsVersion = nodeAttributes.getCurrentOsVersion().map(Version::toFullString).orElse(null);
node.currentFirmwareCheck = nodeAttributes.getCurrentFirmwareCheck().map(Instant::toEpochMilli).orElse(null);
Map<String, JsonNode> reports = nodeAttributes.getReports();
node.reports = reports == null || reports.isEmpty() ? null : new TreeMap<>(reports);
return node;
}
} |
emm, I think don't need modify Coordinator, `GATHER` plan is right for normal node mode | private static boolean enableComputeNode(ExecPlan execPlan) {
if (execPlan.getConnectContext().getSessionVariable().getUseComputeNodes() > 0) {
boolean preferComputeNode = execPlan.getConnectContext().getSessionVariable().isPreferComputeNode();
if (preferComputeNode || RunMode.getCurrentRunMode() == RunMode.SHARED_DATA) {
return true;
}
}
return false;
} | if (execPlan.getConnectContext().getSessionVariable().getUseComputeNodes() > 0) { | private static boolean enableComputeNode(ExecPlan execPlan) {
boolean preferComputeNode = execPlan.getConnectContext().getSessionVariable().isPreferComputeNode();
if (preferComputeNode || RunMode.getCurrentRunMode() == RunMode.SHARED_DATA) {
return true;
}
return false;
} | class PlanFragmentBuilder {
private static final Logger LOG = LogManager.getLogger(PlanFragmentBuilder.class);
public static ExecPlan createPhysicalPlan(OptExpression plan, ConnectContext connectContext,
List<ColumnRefOperator> outputColumns, ColumnRefFactory columnRefFactory,
List<String> colNames,
TResultSinkType resultSinkType,
boolean hasOutputFragment) {
ExecPlan execPlan = new ExecPlan(connectContext, colNames, plan, outputColumns);
createOutputFragment(new PhysicalPlanTranslator(columnRefFactory).translate(plan, execPlan), execPlan,
outputColumns, hasOutputFragment);
execPlan.setPlanCount(plan.getPlanCount());
return finalizeFragments(execPlan, resultSinkType);
}
public static ExecPlan createPhysicalPlanForMV(ConnectContext connectContext,
CreateMaterializedViewStatement createStmt,
OptExpression optExpr,
LogicalPlan logicalPlan,
QueryRelation queryRelation,
ColumnRefFactory columnRefFactory) throws DdlException {
List<String> colNames = queryRelation.getColumnOutputNames();
List<ColumnRefOperator> outputColumns = logicalPlan.getOutputColumn();
ExecPlan execPlan = new ExecPlan(connectContext, colNames, optExpr, outputColumns);
PlanFragment planFragment = new PhysicalPlanTranslator(columnRefFactory).translate(optExpr, execPlan);
execPlan.setPlanCount(optExpr.getPlanCount());
createStmt.setMaintenancePlan(execPlan, columnRefFactory);
for (PlanFragment fragment : execPlan.getFragments()) {
fragment.createDataSink(TResultSinkType.MYSQL_PROTOCAL);
}
Collections.reverse(execPlan.getFragments());
PartitionInfo partitionInfo = LocalMetastore.buildPartitionInfo(createStmt);
long mvId = GlobalStateMgr.getCurrentState().getNextId();
long dbId = GlobalStateMgr.getCurrentState().getDb(createStmt.getTableName().getDb()).getId();
MaterializedView view =
MaterializedViewMgr.getInstance().createSinkTable(createStmt, partitionInfo, mvId, dbId);
TupleDescriptor tupleDesc = buildTupleDesc(execPlan, view);
view.setMaintenancePlan(execPlan);
List<Long> fakePartitionIds = Arrays.asList(1L, 2L, 3L);
DataSink tableSink = new OlapTableSink(view, tupleDesc, fakePartitionIds, true,
view.writeQuorum(), view.enableReplicatedStorage(), false, false);
execPlan.getTopFragment().setSink(tableSink);
return execPlan;
}
public static TupleDescriptor buildTupleDesc(ExecPlan execPlan, Table table) {
DescriptorTable descriptorTable = execPlan.getDescTbl();
TupleDescriptor olapTuple = descriptorTable.createTupleDescriptor();
for (Column column : table.getFullSchema()) {
SlotDescriptor slotDescriptor = descriptorTable.addSlotDescriptor(olapTuple);
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(column.getType());
slotDescriptor.setColumn(column);
slotDescriptor.setIsNullable(column.isAllowNull());
}
olapTuple.computeMemLayout();
return olapTuple;
}
private static void createOutputFragment(PlanFragment inputFragment, ExecPlan execPlan,
List<ColumnRefOperator> outputColumns,
boolean hasOutputFragment) {
if (inputFragment.getPlanRoot() instanceof ExchangeNode || !inputFragment.isPartitioned() ||
!hasOutputFragment) {
List<Expr> outputExprs = outputColumns.stream().map(variable -> ScalarOperatorToExpr
.buildExecExpression(variable,
new ScalarOperatorToExpr.FormatterContext(execPlan.getColRefToExpr()))
).collect(Collectors.toList());
inputFragment.setOutputExprs(outputExprs);
execPlan.getOutputExprs().addAll(outputExprs);
return;
}
List<Expr> outputExprs = outputColumns.stream().map(variable -> ScalarOperatorToExpr
.buildExecExpression(variable, new ScalarOperatorToExpr.FormatterContext(execPlan.getColRefToExpr())))
.collect(Collectors.toList());
execPlan.getOutputExprs().addAll(outputExprs);
if (!enableComputeNode(execPlan)
&& !inputFragment.hashLocalBucketShuffleRightOrFullJoin(inputFragment.getPlanRoot())
&& execPlan.getScanNodes().stream().allMatch(d -> d instanceof OlapScanNode)
&& execPlan.getScanNodes().stream().map(d -> ((OlapScanNode) d).getScanTabletIds().size())
.reduce(Integer::sum).orElse(2) <= 1) {
inputFragment.setOutputExprs(outputExprs);
return;
}
ExchangeNode exchangeNode =
new ExchangeNode(execPlan.getNextNodeId(), inputFragment.getPlanRoot(), DataPartition.UNPARTITIONED);
exchangeNode.setNumInstances(1);
PlanFragment exchangeFragment =
new PlanFragment(execPlan.getNextFragmentId(), exchangeNode, DataPartition.UNPARTITIONED);
inputFragment.setDestination(exchangeNode);
inputFragment.setOutputPartition(DataPartition.UNPARTITIONED);
exchangeFragment.setOutputExprs(outputExprs);
execPlan.getFragments().add(exchangeFragment);
}
private static boolean useQueryCache(ExecPlan execPlan) {
if (!execPlan.getConnectContext().getSessionVariable().isEnableQueryCache()) {
return false;
}
return true;
}
private static ExecPlan finalizeFragments(ExecPlan execPlan, TResultSinkType resultSinkType) {
List<PlanFragment> fragments = execPlan.getFragments();
for (PlanFragment fragment : fragments) {
fragment.createDataSink(resultSinkType);
}
Collections.reverse(fragments);
boolean shouldClearRuntimeFilters = ConnectContext.get() != null &&
!ConnectContext.get().getSessionVariable().getEnableGlobalRuntimeFilter() &&
ConnectContext.get().getSessionVariable().isEnablePipelineEngine();
for (PlanFragment fragment : fragments) {
fragment.computeLocalRfWaitingSet(fragment.getPlanRoot(), shouldClearRuntimeFilters);
}
if (useQueryCache(execPlan)) {
for (PlanFragment fragment : execPlan.getFragments()) {
FragmentNormalizer normalizer = new FragmentNormalizer(execPlan, fragment);
normalizer.normalize();
}
} else if (ConnectContext.get() != null &&
ConnectContext.get().getSessionVariable().isEnableRuntimeAdaptiveDop()) {
for (PlanFragment fragment : fragments) {
if (fragment.canUseRuntimeAdaptiveDop()) {
fragment.enableAdaptiveDop();
}
}
}
return execPlan;
}
private static void maybeClearOlapScanNodePartitions(PlanFragment fragment) {
List<OlapScanNode> olapScanNodes = fragment.collectOlapScanNodes();
long numNodesWithBucketColumns =
olapScanNodes.stream().filter(node -> !node.getBucketColumns().isEmpty()).count();
boolean needClear = numNodesWithBucketColumns > 0 && numNodesWithBucketColumns < olapScanNodes.size();
if (needClear) {
clearOlapScanNodePartitions(fragment.getPlanRoot());
}
}
/**
* Clear partitionExprs of OlapScanNode (the bucket keys to pass to BE).
* <p>
* When partitionExprs of OlapScanNode are passed to BE, the post operators will use them as
* local shuffle partition exprs.
* Otherwise, the operators will use the original partition exprs (group by keys or join on keys).
* <p>
* The bucket keys can satisfy the required hash property of blocking aggregation except two scenarios:
* - OlapScanNode only has one tablet after pruned.
* - It is executed on the single BE.
* As for these two scenarios, which will generate ScanNode(k1)->LocalShuffle(c1)->BlockingAgg(c1),
* partitionExprs of OlapScanNode must be cleared to make BE use group by keys not bucket keys as
* local shuffle partition exprs.
*
* @param root The root node of the fragment which need to check whether to clear bucket keys of OlapScanNode.
*/
private static void clearOlapScanNodePartitions(PlanNode root) {
if (root instanceof OlapScanNode) {
OlapScanNode scanNode = (OlapScanNode) root;
scanNode.setBucketExprs(Lists.newArrayList());
scanNode.setBucketColumns(Lists.newArrayList());
return;
}
if (root instanceof ExchangeNode) {
return;
}
for (PlanNode child : root.getChildren()) {
clearOlapScanNodePartitions(child);
}
}
private static class PhysicalPlanTranslator extends OptExpressionVisitor<PlanFragment, ExecPlan> {
private final ColumnRefFactory columnRefFactory;
private final IdGenerator<RuntimeFilterId> runtimeFilterIdIdGenerator = RuntimeFilterId.createGenerator();
private boolean canUseLocalShuffleAgg = true;
public PhysicalPlanTranslator(ColumnRefFactory columnRefFactory) {
this.columnRefFactory = columnRefFactory;
}
public PlanFragment translate(OptExpression optExpression, ExecPlan context) {
return visit(optExpression, context);
}
@Override
public PlanFragment visit(OptExpression optExpression, ExecPlan context) {
canUseLocalShuffleAgg &= optExpression.arity() <= 1;
PlanFragment fragment = optExpression.getOp().accept(this, optExpression, context);
Projection projection = (optExpression.getOp()).getProjection();
if (projection == null) {
return fragment;
} else {
return buildProjectNode(optExpression, projection, fragment, context);
}
}
private void setUnUsedOutputColumns(PhysicalOlapScanOperator node, OlapScanNode scanNode,
List<ScalarOperator> predicates, OlapTable referenceTable) {
if (!ConnectContext.get().getSessionVariable().isEnableFilterUnusedColumnsInScanStage()) {
return;
}
MaterializedIndexMeta materializedIndexMeta =
referenceTable.getIndexMetaByIndexId(node.getSelectedIndexId());
if (materializedIndexMeta.getKeysType().isAggregationFamily() && !node.isPreAggregation()) {
return;
}
List<ColumnRefOperator> outputColumns = node.getOutputColumns();
if (outputColumns.isEmpty()) {
return;
}
Set<Integer> outputColumnIds = new HashSet<Integer>();
for (ColumnRefOperator colref : outputColumns) {
outputColumnIds.add(colref.getId());
}
Set<Integer> singlePredColumnIds = new HashSet<Integer>();
Set<Integer> complexPredColumnIds = new HashSet<Integer>();
Set<String> aggOrPrimaryKeyTableValueColumnNames = new HashSet<String>();
if (materializedIndexMeta.getKeysType().isAggregationFamily() ||
materializedIndexMeta.getKeysType() == KeysType.PRIMARY_KEYS) {
aggOrPrimaryKeyTableValueColumnNames =
materializedIndexMeta.getSchema().stream()
.filter(col -> !col.isKey())
.map(Column::getName)
.collect(Collectors.toSet());
}
for (ScalarOperator predicate : predicates) {
ColumnRefSet usedColumns = predicate.getUsedColumns();
if (DecodeVisitor.isSimpleStrictPredicate(predicate)) {
for (int cid : usedColumns.getColumnIds()) {
singlePredColumnIds.add(cid);
}
} else {
for (int cid : usedColumns.getColumnIds()) {
complexPredColumnIds.add(cid);
}
}
}
Set<Integer> unUsedOutputColumnIds = new HashSet<Integer>();
Map<Integer, Integer> dictStringIdToIntIds = node.getDictStringIdToIntIds();
for (Integer cid : singlePredColumnIds) {
Integer newCid = cid;
if (dictStringIdToIntIds.containsKey(cid)) {
newCid = dictStringIdToIntIds.get(cid);
}
if (!complexPredColumnIds.contains(newCid) && !outputColumnIds.contains(newCid)) {
unUsedOutputColumnIds.add(newCid);
}
}
scanNode.setUnUsedOutputStringColumns(unUsedOutputColumnIds, aggOrPrimaryKeyTableValueColumnNames);
}
@Override
public PlanFragment visitPhysicalProject(OptExpression optExpr, ExecPlan context) {
PhysicalProjectOperator node = (PhysicalProjectOperator) optExpr.getOp();
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
Preconditions.checkState(!node.getColumnRefMap().isEmpty());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
Map<SlotId, Expr> commonSubOperatorMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getCommonSubOperatorMap().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(),
node.getCommonSubOperatorMap()));
commonSubOperatorMap.put(new SlotId(entry.getKey().getId()), expr);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setIsNullable(expr.isNullable());
slotDescriptor.setIsMaterialized(false);
slotDescriptor.setType(expr.getType());
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
Map<SlotId, Expr> projectMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getColumnRefMap().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(), node.getColumnRefMap()));
projectMap.put(new SlotId(entry.getKey().getId()), expr);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setIsNullable(expr.isNullable());
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(expr.getType());
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
ProjectNode projectNode =
new ProjectNode(context.getNextNodeId(),
tupleDescriptor,
inputFragment.getPlanRoot(),
projectMap,
commonSubOperatorMap);
projectNode.setHasNullableGenerateChild();
projectNode.computeStatistics(optExpr.getStatistics());
for (SlotId sid : projectMap.keySet()) {
SlotDescriptor slotDescriptor = tupleDescriptor.getSlot(sid.asInt());
slotDescriptor.setIsNullable(slotDescriptor.getIsNullable() | projectNode.isHasNullableGenerateChild());
}
tupleDescriptor.computeMemLayout();
projectNode.setLimit(inputFragment.getPlanRoot().getLimit());
inputFragment.setPlanRoot(projectNode);
return inputFragment;
}
public PlanFragment buildProjectNode(OptExpression optExpression, Projection node, PlanFragment inputFragment,
ExecPlan context) {
if (node == null) {
return inputFragment;
}
Preconditions.checkState(!node.getColumnRefMap().isEmpty());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
Map<SlotId, Expr> commonSubOperatorMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getCommonSubOperatorMap().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(),
node.getCommonSubOperatorMap()));
commonSubOperatorMap.put(new SlotId(entry.getKey().getId()), expr);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setIsNullable(expr.isNullable());
slotDescriptor.setIsMaterialized(false);
slotDescriptor.setType(expr.getType());
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
Map<SlotId, Expr> projectMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getColumnRefMap().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(), node.getColumnRefMap()));
projectMap.put(new SlotId(entry.getKey().getId()), expr);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setIsNullable(expr.isNullable());
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(expr.getType());
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
ProjectNode projectNode =
new ProjectNode(context.getNextNodeId(),
tupleDescriptor,
inputFragment.getPlanRoot(),
projectMap,
commonSubOperatorMap);
projectNode.setHasNullableGenerateChild();
Statistics statistics = optExpression.getStatistics();
Statistics.Builder b = Statistics.builder();
b.setOutputRowCount(statistics.getOutputRowCount());
b.addColumnStatisticsFromOtherStatistic(statistics, new ColumnRefSet(node.getOutputColumns()));
projectNode.computeStatistics(b.build());
for (SlotId sid : projectMap.keySet()) {
SlotDescriptor slotDescriptor = tupleDescriptor.getSlot(sid.asInt());
slotDescriptor.setIsNullable(slotDescriptor.getIsNullable() | projectNode.isHasNullableGenerateChild());
}
tupleDescriptor.computeMemLayout();
projectNode.setLimit(inputFragment.getPlanRoot().getLimit());
inputFragment.setPlanRoot(projectNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalDecode(OptExpression optExpression, ExecPlan context) {
PhysicalDecodeOperator node = (PhysicalDecodeOperator) optExpression.getOp();
PlanFragment inputFragment = visit(optExpression.inputAt(0), context);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
for (TupleId tupleId : inputFragment.getPlanRoot().getTupleIds()) {
TupleDescriptor childTuple = context.getDescTbl().getTupleDesc(tupleId);
ArrayList<SlotDescriptor> slots = childTuple.getSlots();
for (SlotDescriptor slot : slots) {
int slotId = slot.getId().asInt();
boolean isNullable = slot.getIsNullable();
if (node.getDictToStrings().containsKey(slotId)) {
Integer stringSlotId = node.getDictToStrings().get(slotId);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(stringSlotId));
slotDescriptor.setIsNullable(isNullable);
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(Type.VARCHAR);
context.getColRefToExpr().put(new ColumnRefOperator(stringSlotId, Type.VARCHAR,
"<dict-code>", slotDescriptor.getIsNullable()),
new SlotRef(stringSlotId.toString(), slotDescriptor));
} else {
SlotDescriptor slotDescriptor = new SlotDescriptor(slot.getId(), tupleDescriptor, slot);
tupleDescriptor.addSlot(slotDescriptor);
}
}
}
Map<SlotId, Expr> projectMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getStringFunctions().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(),
node.getStringFunctions()));
projectMap.put(new SlotId(entry.getKey().getId()), expr);
Preconditions.checkState(context.getColRefToExpr().containsKey(entry.getKey()));
}
tupleDescriptor.computeMemLayout();
DecodeNode decodeNode = new DecodeNode(context.getNextNodeId(),
tupleDescriptor,
inputFragment.getPlanRoot(),
node.getDictToStrings(), projectMap);
decodeNode.computeStatistics(optExpression.getStatistics());
decodeNode.setLimit(node.getLimit());
inputFragment.setPlanRoot(decodeNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalOlapScan(OptExpression optExpr, ExecPlan context) {
PhysicalOlapScanOperator node = (PhysicalOlapScanOperator) optExpr.getOp();
OlapTable referenceTable = (OlapTable) node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
OlapScanNode scanNode = new OlapScanNode(context.getNextNodeId(), tupleDescriptor, "OlapScanNode");
scanNode.setLimit(node.getLimit());
scanNode.computeStatistics(optExpr.getStatistics());
try {
scanNode.updateScanInfo(node.getSelectedPartitionId(),
node.getSelectedTabletId(),
node.getSelectedIndexId());
long selectedIndexId = node.getSelectedIndexId();
long totalTabletsNum = 0;
long localBeId = -1;
if (Config.enable_local_replica_selection) {
localBeId = GlobalStateMgr.getCurrentSystemInfo()
.getBackendIdByHost(FrontendOptions.getLocalHostAddress());
}
List<Long> selectedNonEmptyPartitionIds = node.getSelectedPartitionId().stream().filter(p -> {
List<Long> selectTabletIds = scanNode.getPartitionToScanTabletMap().get(p);
return selectTabletIds != null && !selectTabletIds.isEmpty();
}).collect(Collectors.toList());
scanNode.setSelectedPartitionIds(selectedNonEmptyPartitionIds);
for (Long partitionId : scanNode.getSelectedPartitionIds()) {
List<Long> selectTabletIds = scanNode.getPartitionToScanTabletMap().get(partitionId);
Preconditions.checkState(selectTabletIds != null && !selectTabletIds.isEmpty());
final Partition partition = referenceTable.getPartition(partitionId);
final MaterializedIndex selectedTable = partition.getIndex(selectedIndexId);
List<Long> allTabletIds = selectedTable.getTabletIdsInOrder();
Map<Long, Integer> tabletId2BucketSeq = Maps.newHashMap();
for (int i = 0; i < allTabletIds.size(); i++) {
tabletId2BucketSeq.put(allTabletIds.get(i), i);
}
totalTabletsNum += selectedTable.getTablets().size();
scanNode.setTabletId2BucketSeq(tabletId2BucketSeq);
List<Tablet> tablets =
selectTabletIds.stream().map(selectedTable::getTablet).collect(Collectors.toList());
scanNode.addScanRangeLocations(partition, selectedTable, tablets, localBeId);
}
scanNode.setTotalTabletsNum(totalTabletsNum);
} catch (UserException e) {
throw new StarRocksPlannerException(
"Build Exec OlapScanNode fail, scan info is invalid," + e.getMessage(),
INTERNAL_ERROR);
}
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
if (slotDescriptor.getOriginType().isComplexType()) {
slotDescriptor.setOriginType(entry.getKey().getType());
slotDescriptor.setType(entry.getKey().getType());
}
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
scanNode.setColumnAccessPaths(node.getColumnAccessPaths());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
for (ScalarOperator predicate : node.getPrunedPartitionPredicates()) {
scanNode.getPrunedPartitionPredicates()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
tupleDescriptor.computeMemLayout();
setUnUsedOutputColumns(node, scanNode, predicates, referenceTable);
scanNode.setIsSortedByKeyPerTablet(node.needSortedByKeyPerTablet());
scanNode.setIsPreAggregation(node.isPreAggregation(), node.getTurnOffReason());
scanNode.setDictStringIdToIntIds(node.getDictStringIdToIntIds());
scanNode.updateAppliedDictStringColumns(node.getGlobalDicts().stream().
map(entry -> entry.first).collect(Collectors.toSet()));
scanNode.setUsePkIndex(node.isUsePkIndex());
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.RANDOM);
fragment.setQueryGlobalDicts(node.getGlobalDicts());
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalMetaScan(OptExpression optExpression, ExecPlan context) {
PhysicalMetaScanOperator scan = (PhysicalMetaScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(scan.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(scan.getTable());
MetaScanNode scanNode =
new MetaScanNode(context.getNextNodeId(),
tupleDescriptor, (OlapTable) scan.getTable(), scan.getAggColumnIdToNames());
scanNode.computeRangeLocations();
scanNode.computeStatistics(optExpression.getStatistics());
for (Map.Entry<ColumnRefOperator, Column> entry : scan.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().getName(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
private void prepareContextSlots(PhysicalScanOperator node, ExecPlan context, TupleDescriptor tupleDescriptor) {
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
if (slotDescriptor.getOriginType().isComplexType()) {
slotDescriptor.setOriginType(entry.getKey().getType());
slotDescriptor.setType(entry.getKey().getType());
}
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
}
private void prepareCommonExpr(HDFSScanNodePredicates scanNodePredicates,
ScanOperatorPredicates predicates, ExecPlan context) {
List<ScalarOperator> noEvalPartitionConjuncts = predicates.getNoEvalPartitionConjuncts();
List<ScalarOperator> nonPartitionConjuncts = predicates.getNonPartitionConjuncts();
List<ScalarOperator> partitionConjuncts = predicates.getPartitionConjuncts();
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator partitionConjunct : partitionConjuncts) {
scanNodePredicates.getPartitionConjuncts().
add(ScalarOperatorToExpr.buildExecExpression(partitionConjunct, formatterContext));
}
for (ScalarOperator noEvalPartitionConjunct : noEvalPartitionConjuncts) {
scanNodePredicates.getNoEvalPartitionConjuncts().
add(ScalarOperatorToExpr.buildExecExpression(noEvalPartitionConjunct, formatterContext));
}
for (ScalarOperator nonPartitionConjunct : nonPartitionConjuncts) {
scanNodePredicates.getNonPartitionConjuncts().
add(ScalarOperatorToExpr.buildExecExpression(nonPartitionConjunct, formatterContext));
}
}
private void prepareMinMaxExpr(HDFSScanNodePredicates scanNodePredicates,
ScanOperatorPredicates predicates, ExecPlan context) {
/*
* populates 'minMaxTuple' with slots for statistics values,
* and populates 'minMaxConjuncts' with conjuncts pointing into the 'minMaxTuple'
*/
List<ScalarOperator> minMaxConjuncts = predicates.getMinMaxConjuncts();
TupleDescriptor minMaxTuple = context.getDescTbl().createTupleDescriptor();
for (ScalarOperator minMaxConjunct : minMaxConjuncts) {
for (ColumnRefOperator columnRefOperator : Utils.extractColumnRef(minMaxConjunct)) {
SlotDescriptor slotDescriptor =
context.getDescTbl()
.addSlotDescriptor(minMaxTuple, new SlotId(columnRefOperator.getId()));
Column column = predicates.getMinMaxColumnRefMap().get(columnRefOperator);
slotDescriptor.setColumn(column);
slotDescriptor.setIsNullable(column.isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr()
.put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDescriptor));
}
}
minMaxTuple.computeMemLayout();
scanNodePredicates.setMinMaxTuple(minMaxTuple);
ScalarOperatorToExpr.FormatterContext minMaxFormatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator minMaxConjunct : minMaxConjuncts) {
scanNodePredicates.getMinMaxConjuncts().
add(ScalarOperatorToExpr.buildExecExpression(minMaxConjunct, minMaxFormatterContext));
}
}
@Override
public PlanFragment visitPhysicalHudiScan(OptExpression optExpression, ExecPlan context) {
PhysicalHudiScanOperator node = (PhysicalHudiScanOperator) optExpression.getOp();
ScanOperatorPredicates predicates = node.getScanOperatorPredicates();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
HudiScanNode hudiScanNode =
new HudiScanNode(context.getNextNodeId(), tupleDescriptor, "HudiScanNode");
hudiScanNode.computeStatistics(optExpression.getStatistics());
try {
HDFSScanNodePredicates scanNodePredicates = hudiScanNode.getScanNodePredicates();
scanNodePredicates.setSelectedPartitionIds(predicates.getSelectedPartitionIds());
scanNodePredicates.setIdToPartitionKey(predicates.getIdToPartitionKey());
hudiScanNode.setupScanRangeLocations(context.getDescTbl());
prepareCommonExpr(scanNodePredicates, predicates, context);
prepareMinMaxExpr(scanNodePredicates, predicates, context);
} catch (Exception e) {
LOG.warn("Hudi scan node get scan range locations failed : " + e);
LOG.warn(e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
hudiScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(hudiScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), hudiScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalHiveScan(OptExpression optExpression, ExecPlan context) {
PhysicalHiveScanOperator node = (PhysicalHiveScanOperator) optExpression.getOp();
ScanOperatorPredicates predicates = node.getScanOperatorPredicates();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
HdfsScanNode hdfsScanNode =
new HdfsScanNode(context.getNextNodeId(), tupleDescriptor, "HdfsScanNode");
hdfsScanNode.computeStatistics(optExpression.getStatistics());
try {
HDFSScanNodePredicates scanNodePredicates = hdfsScanNode.getScanNodePredicates();
scanNodePredicates.setSelectedPartitionIds(predicates.getSelectedPartitionIds());
scanNodePredicates.setIdToPartitionKey(predicates.getIdToPartitionKey());
hdfsScanNode.setupScanRangeLocations(context.getDescTbl());
prepareCommonExpr(scanNodePredicates, predicates, context);
prepareMinMaxExpr(scanNodePredicates, predicates, context);
} catch (Exception e) {
LOG.warn("Hdfs scan node get scan range locations failed : " + e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
hdfsScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(hdfsScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), hdfsScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalFileScan(OptExpression optExpression, ExecPlan context) {
PhysicalFileScanOperator node = (PhysicalFileScanOperator) optExpression.getOp();
ScanOperatorPredicates predicates = node.getScanOperatorPredicates();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
FileTableScanNode fileTableScanNode =
new FileTableScanNode(context.getNextNodeId(), tupleDescriptor, "FileTableScanNode");
fileTableScanNode.computeStatistics(optExpression.getStatistics());
try {
HDFSScanNodePredicates scanNodePredicates = fileTableScanNode.getScanNodePredicates();
fileTableScanNode.setupScanRangeLocations();
prepareCommonExpr(scanNodePredicates, predicates, context);
prepareMinMaxExpr(scanNodePredicates, predicates, context);
} catch (Exception e) {
LOG.warn("Hdfs scan node get scan range locations failed : ", e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
fileTableScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(fileTableScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), fileTableScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalDeltaLakeScan(OptExpression optExpression, ExecPlan context) {
PhysicalDeltaLakeScanOperator node = (PhysicalDeltaLakeScanOperator) optExpression.getOp();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
DeltaLakeScanNode deltaLakeScanNode =
new DeltaLakeScanNode(context.getNextNodeId(), tupleDescriptor, "DeltaLakeScanNode");
deltaLakeScanNode.computeStatistics(optExpression.getStatistics());
try {
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
for (ScalarOperator predicate : predicates) {
deltaLakeScanNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
deltaLakeScanNode.setupScanRangeLocations(context.getDescTbl());
HDFSScanNodePredicates scanNodePredicates = deltaLakeScanNode.getScanNodePredicates();
prepareMinMaxExpr(scanNodePredicates, node.getScanOperatorPredicates(), context);
} catch (AnalysisException e) {
LOG.warn("Delta lake scan node get scan range locations failed : " + e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
deltaLakeScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(deltaLakeScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), deltaLakeScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
public PlanFragment visitPhysicalPaimonScan(OptExpression optExpression, ExecPlan context) {
PhysicalPaimonScanOperator node = (PhysicalPaimonScanOperator) optExpression.getOp();
ScanOperatorPredicates predicates = node.getScanOperatorPredicates();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
PaimonScanNode paimonScanNode =
new PaimonScanNode(context.getNextNodeId(), tupleDescriptor, "PaimonScanNode");
paimonScanNode.computeStatistics(optExpression.getStatistics());
try {
HDFSScanNodePredicates scanNodePredicates = paimonScanNode.getScanNodePredicates();
scanNodePredicates.setSelectedPartitionIds(predicates.getSelectedPartitionIds());
scanNodePredicates.setIdToPartitionKey(predicates.getIdToPartitionKey());
paimonScanNode.setupScanRangeLocations(context.getDescTbl());
prepareCommonExpr(scanNodePredicates, predicates, context);
prepareMinMaxExpr(scanNodePredicates, predicates, context);
} catch (Exception e) {
LOG.warn("Paimon scan node get scan range locations failed : " + e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
paimonScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(paimonScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), paimonScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalIcebergScan(OptExpression optExpression, ExecPlan context) {
PhysicalIcebergScanOperator node = (PhysicalIcebergScanOperator) optExpression.getOp();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
IcebergScanNode icebergScanNode =
new IcebergScanNode(context.getNextNodeId(), tupleDescriptor, "IcebergScanNode");
icebergScanNode.computeStatistics(optExpression.getStatistics());
try {
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
for (ScalarOperator predicate : predicates) {
icebergScanNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
icebergScanNode.preProcessIcebergPredicate(node.getPredicate());
icebergScanNode.setupScanRangeLocations();
icebergScanNode.appendEqualityColumns(node, columnRefFactory, context);
HDFSScanNodePredicates scanNodePredicates = icebergScanNode.getScanNodePredicates();
prepareMinMaxExpr(scanNodePredicates, node.getScanOperatorPredicates(), context);
} catch (UserException e) {
LOG.warn("Iceberg scan node get scan range locations failed : " + e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
icebergScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(icebergScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), icebergScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalSchemaScan(OptExpression optExpression, ExecPlan context) {
PhysicalSchemaScanOperator node = (PhysicalSchemaScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(node.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(node.getTable());
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
SchemaScanNode scanNode = new SchemaScanNode(context.getNextNodeId(), tupleDescriptor);
scanNode.setFrontendIP(FrontendOptions.getLocalHostAddress());
scanNode.setFrontendPort(Config.rpc_port);
scanNode.setUser(context.getConnectContext().getQualifiedUser());
scanNode.setUserIp(context.getConnectContext().getRemoteIP());
scanNode.setLimit(node.getLimit());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
if (!(predicate.getChildren().size() == 2 &&
predicate.getChildren().get(0) instanceof ColumnRefOperator &&
predicate.getChildren().get(1) instanceof ConstantOperator)) {
continue;
}
ColumnRefOperator columnRefOperator = (ColumnRefOperator) predicate.getChildren().get(0);
ConstantOperator constantOperator = (ConstantOperator) predicate.getChildren().get(1);
if (predicate instanceof BinaryPredicateOperator) {
BinaryPredicateOperator binaryPredicateOperator = (BinaryPredicateOperator) predicate;
if (binaryPredicateOperator.getBinaryType() == BinaryType.EQ) {
switch (columnRefOperator.getName()) {
case "TABLE_SCHEMA":
case "DATABASE_NAME":
scanNode.setSchemaDb(constantOperator.getVarchar());
break;
case "TABLE_NAME":
scanNode.setSchemaTable(constantOperator.getVarchar());
break;
case "BE_ID":
scanNode.setBeId(constantOperator.getBigint());
break;
case "TABLE_ID":
scanNode.setTableId(constantOperator.getBigint());
break;
case "PARTITION_ID":
scanNode.setPartitionId(constantOperator.getBigint());
break;
case "TABLET_ID":
scanNode.setTabletId(constantOperator.getBigint());
break;
case "TXN_ID":
scanNode.setTxnId(constantOperator.getBigint());
break;
case "LABEL":
scanNode.setLabel(constantOperator.getVarchar());
break;
case "JOB_ID":
scanNode.setJobId(constantOperator.getBigint());
break;
case "TYPE":
scanNode.setType(constantOperator.getVarchar());
break;
case "STATE":
scanNode.setState(constantOperator.getVarchar());
break;
case "LOG":
scanNode.setLogPattern("^" + constantOperator.getVarchar() + "$");
break;
case "LEVEL":
scanNode.setLogLevel(constantOperator.getVarchar());
break;
default:
break;
}
}
if (columnRefOperator.getName().equals("TIMESTAMP")) {
BinaryType opType = binaryPredicateOperator.getBinaryType();
if (opType == BinaryType.EQ) {
scanNode.setLogStartTs(constantOperator.getBigint());
scanNode.setLogEndTs(constantOperator.getBigint() + 1);
} else if (opType == BinaryType.GT) {
scanNode.setLogStartTs(constantOperator.getBigint() + 1);
} else if (opType == BinaryType.GE) {
scanNode.setLogStartTs(constantOperator.getBigint());
} else if (opType == BinaryType.LT) {
scanNode.setLogEndTs(constantOperator.getBigint());
} else if (opType == BinaryType.LE) {
scanNode.setLogEndTs(constantOperator.getBigint() + 1);
}
}
} else if (predicate instanceof LikePredicateOperator) {
LikePredicateOperator like = (LikePredicateOperator) predicate;
if (columnRefOperator.getName().equals("LOG")) {
if (like.getLikeType() == LikePredicateOperator.LikeType.REGEXP) {
scanNode.setLogPattern(((ConstantOperator) like.getChildren().get(1)).getVarchar());
} else {
throw UnsupportedException.unsupportedException(
"only support `regexp` or `rlike` for log grep");
}
}
}
}
if (scanNode.getTableName().equalsIgnoreCase("load_tracking_logs") && scanNode.getLabel() == null
&& scanNode.getJobId() == null) {
throw UnsupportedException.unsupportedException("load_tracking_logs must specify label or job_id");
}
if (scanNode.isBeSchemaTable()) {
scanNode.computeBeScanRanges();
}
if (scanNode.getLimit() > 0 && predicates.isEmpty()) {
scanNode.setLogLimit(Math.min(scanNode.getLimit(), Config.max_per_node_grep_log_limit));
} else {
scanNode.setLogLimit(Config.max_per_node_grep_log_limit);
}
context.getScanNodes().add(scanNode);
PlanFragment fragment = new PlanFragment(context.getNextFragmentId(), scanNode,
scanNode.isBeSchemaTable() ? DataPartition.RANDOM : DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalMysqlScan(OptExpression optExpression, ExecPlan context) {
PhysicalMysqlScanOperator node = (PhysicalMysqlScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(node.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(node.getTable());
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().getName(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
MysqlScanNode scanNode = new MysqlScanNode(context.getNextNodeId(), tupleDescriptor,
(MysqlTable) node.getTable());
if (node.getTemporalClause() != null) {
scanNode.setTemporalClause(node.getTemporalClause());
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
scanNode.setLimit(node.getLimit());
scanNode.computeColumnsAndFilters();
scanNode.computeStatistics(optExpression.getStatistics());
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalEsScan(OptExpression optExpression, ExecPlan context) {
PhysicalEsScanOperator node = (PhysicalEsScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(node.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(node.getTable());
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
EsScanNode scanNode = new EsScanNode(context.getNextNodeId(), tupleDescriptor, "EsScanNode");
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
scanNode.setLimit(node.getLimit());
scanNode.computeStatistics(optExpression.getStatistics());
try {
scanNode.assignBackends();
} catch (UserException e) {
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
scanNode.setShardScanRanges(scanNode.computeShardLocations(node.getSelectedIndex()));
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalJDBCScan(OptExpression optExpression, ExecPlan context) {
PhysicalJDBCScanOperator node = (PhysicalJDBCScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(node.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(node.getTable());
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().getName(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
JDBCScanNode scanNode = new JDBCScanNode(context.getNextNodeId(), tupleDescriptor,
(JDBCTable) node.getTable());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
scanNode.setLimit(node.getLimit());
scanNode.computeColumnsAndFilters();
scanNode.computeStatistics(optExpression.getStatistics());
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalValues(OptExpression optExpr, ExecPlan context) {
PhysicalValuesOperator valuesOperator = (PhysicalValuesOperator) optExpr.getOp();
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
for (ColumnRefOperator columnRefOperator : valuesOperator.getColumnRefSet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(columnRefOperator.getId()));
slotDescriptor.setIsNullable(columnRefOperator.isNullable());
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(columnRefOperator.getType());
context.getColRefToExpr()
.put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
if (valuesOperator.getRows().isEmpty()) {
EmptySetNode emptyNode = new EmptySetNode(context.getNextNodeId(),
Lists.newArrayList(tupleDescriptor.getId()));
emptyNode.computeStatistics(optExpr.getStatistics());
PlanFragment fragment = new PlanFragment(context.getNextFragmentId(), emptyNode,
DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
} else {
UnionNode unionNode = new UnionNode(context.getNextNodeId(), tupleDescriptor.getId());
unionNode.setLimit(valuesOperator.getLimit());
List<List<Expr>> consts = new ArrayList<>();
for (List<ScalarOperator> row : valuesOperator.getRows()) {
List<Expr> exprRow = new ArrayList<>();
for (ScalarOperator field : row) {
exprRow.add(ScalarOperatorToExpr.buildExecExpression(
field, new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())));
}
consts.add(exprRow);
}
unionNode.setMaterializedConstExprLists_(consts);
unionNode.computeStatistics(optExpr.getStatistics());
/*
* TODO(lhy):
* It doesn't make sense for vectorized execution engines, but it will appear in explain.
* we can delete this when refactoring explain in the future,
*/
consts.forEach(unionNode::addConstExprList);
PlanFragment fragment = new PlanFragment(context.getNextFragmentId(), unionNode,
DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
}
}
public static boolean hasNoExchangeNodes(PlanNode root) {
if (root instanceof ExchangeNode) {
return false;
}
for (PlanNode childNode : root.getChildren()) {
if (!hasNoExchangeNodes(childNode)) {
return false;
}
}
return true;
}
/**
* Whether all the nodes of the plan tree only contain the specific node types.
*
* @param root The plan tree root.
* @param requiredNodeTypes The specific node type.
* @return true if all the nodes belong to the node types, otherwise false.
*/
private boolean onlyContainNodeTypes(PlanNode root, List<Class<? extends PlanNode>> requiredNodeTypes) {
boolean rootMatched = requiredNodeTypes.stream().anyMatch(type -> type.isInstance(root));
if (!rootMatched) {
return false;
}
for (PlanNode child : root.getChildren()) {
if (!onlyContainNodeTypes(child, requiredNodeTypes)) {
return false;
}
}
return true;
}
/**
* Remove ExchangeNode between AggNode and ScanNode for the single backend.
* <p>
* This is used to generate "ScanNode->LocalShuffle->OnePhaseLocalAgg" for the single backend,
* which contains two steps:
* 1. Ignore the network cost for ExchangeNode when estimating cost model.
* 2. Remove ExchangeNode between AggNode and ScanNode when building fragments.
* <p>
* Specifically, transfer
* (AggNode->ExchangeNode)->([ProjectNode->]ScanNode)
* - *inputFragment sourceFragment
* to
* (AggNode->[ProjectNode->]ScanNode)
* - *sourceFragment
* That is, when matching this fragment pattern, remove inputFragment and return sourceFragment.
*
* @param inputFragment The input fragment to match the above pattern.
* @param context The context of building fragment, which contains all the fragments.
* @return SourceFragment if it matches th pattern, otherwise the original inputFragment.
*/
private PlanFragment removeExchangeNodeForLocalShuffleAgg(PlanFragment inputFragment, ExecPlan context) {
if (ConnectContext.get() == null) {
return inputFragment;
}
if (!canUseLocalShuffleAgg) {
return inputFragment;
}
SessionVariable sessionVariable = ConnectContext.get().getSessionVariable();
boolean enableLocalShuffleAgg = sessionVariable.isEnableLocalShuffleAgg()
&& sessionVariable.isEnablePipelineEngine()
&& GlobalStateMgr.getCurrentSystemInfo().isSingleBackendAndComputeNode();
if (!enableLocalShuffleAgg) {
return inputFragment;
}
if (!(inputFragment.getPlanRoot() instanceof ExchangeNode)) {
return inputFragment;
}
PlanNode sourceFragmentRoot = inputFragment.getPlanRoot().getChild(0);
if (!onlyContainNodeTypes(sourceFragmentRoot, ImmutableList.of(ScanNode.class, ProjectNode.class))) {
return inputFragment;
}
PlanFragment sourceFragment = sourceFragmentRoot.getFragment();
if (sourceFragment instanceof MultiCastPlanFragment) {
return inputFragment;
}
ArrayList<PlanFragment> fragments = context.getFragments();
for (int i = fragments.size() - 1; i >= 0; --i) {
if (fragments.get(i).equals(inputFragment)) {
fragments.remove(i);
break;
}
}
sourceFragment.clearDestination();
sourceFragment.clearOutputPartition();
return sourceFragment;
}
private static class AggregateExprInfo {
public final ArrayList<Expr> groupExpr;
public final ArrayList<FunctionCallExpr> aggregateExpr;
public final ArrayList<Expr> partitionExpr;
public final ArrayList<Expr> intermediateExpr;
public AggregateExprInfo(ArrayList<Expr> groupExpr, ArrayList<FunctionCallExpr> aggregateExpr,
ArrayList<Expr> partitionExpr,
ArrayList<Expr> intermediateExpr) {
this.groupExpr = groupExpr;
this.aggregateExpr = aggregateExpr;
this.partitionExpr = partitionExpr;
this.intermediateExpr = intermediateExpr;
}
}
private AggregateExprInfo buildAggregateTuple(
Map<ColumnRefOperator, CallOperator> aggregations,
List<ColumnRefOperator> groupBys,
List<ColumnRefOperator> partitionBys,
TupleDescriptor outputTupleDesc,
ExecPlan context) {
ArrayList<Expr> groupingExpressions = Lists.newArrayList();
boolean forExchangePerf = aggregations.values().stream().anyMatch(aggFunc ->
aggFunc.getFnName().equals(FunctionSet.EXCHANGE_BYTES) ||
aggFunc.getFnName().equals(FunctionSet.EXCHANGE_SPEED)) &&
ConnectContext.get().getSessionVariable().getNewPlannerAggStage() == 1;
if (!forExchangePerf) {
for (ColumnRefOperator grouping : CollectionUtils.emptyIfNull(groupBys)) {
Expr groupingExpr = ScalarOperatorToExpr.buildExecExpression(grouping,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
groupingExpressions.add(groupingExpr);
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(outputTupleDesc, new SlotId(grouping.getId()));
slotDesc.setType(groupingExpr.getType());
slotDesc.setIsNullable(groupingExpr.isNullable());
slotDesc.setIsMaterialized(true);
}
}
ArrayList<FunctionCallExpr> aggregateExprList = Lists.newArrayList();
ArrayList<Expr> intermediateAggrExprs = Lists.newArrayList();
for (Map.Entry<ColumnRefOperator, CallOperator> aggregation : aggregations.entrySet()) {
FunctionCallExpr aggExpr = (FunctionCallExpr) ScalarOperatorToExpr.buildExecExpression(
aggregation.getValue(), new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
aggregateExprList.add(aggExpr);
SlotDescriptor slotDesc = context.getDescTbl()
.addSlotDescriptor(outputTupleDesc, new SlotId(aggregation.getKey().getId()));
slotDesc.setType(aggregation.getValue().getType());
slotDesc.setIsNullable(aggExpr.isNullable());
slotDesc.setIsMaterialized(true);
context.getColRefToExpr()
.put(aggregation.getKey(), new SlotRef(aggregation.getKey().toString(), slotDesc));
SlotDescriptor intermediateSlotDesc = new SlotDescriptor(slotDesc.getId(), slotDesc.getParent());
AggregateFunction aggrFn = (AggregateFunction) aggExpr.getFn();
Type intermediateType = aggrFn.getIntermediateType() != null ?
aggrFn.getIntermediateType() : aggrFn.getReturnType();
intermediateSlotDesc.setType(intermediateType);
intermediateSlotDesc.setIsNullable(aggrFn.isNullable());
intermediateSlotDesc.setIsMaterialized(true);
SlotRef intermediateSlotRef = new SlotRef(aggregation.getKey().toString(), intermediateSlotDesc);
intermediateAggrExprs.add(intermediateSlotRef);
}
ArrayList<Expr> partitionExpressions = Lists.newArrayList();
for (ColumnRefOperator column : CollectionUtils.emptyIfNull(partitionBys)) {
Expr partitionExpr = ScalarOperatorToExpr.buildExecExpression(column,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(outputTupleDesc, new SlotId(column.getId()));
slotDesc.setType(partitionExpr.getType());
slotDesc.setIsNullable(partitionExpr.isNullable());
slotDesc.setIsMaterialized(true);
context.getColRefToExpr().put(column, new SlotRef(column.toString(), slotDesc));
partitionExpressions.add(new SlotRef(slotDesc));
}
outputTupleDesc.computeMemLayout();
return new AggregateExprInfo(groupingExpressions, aggregateExprList, partitionExpressions,
intermediateAggrExprs);
}
@Override
public PlanFragment visitPhysicalHashAggregate(OptExpression optExpr, ExecPlan context) {
PhysicalHashAggregateOperator node = (PhysicalHashAggregateOperator) optExpr.getOp();
PlanFragment originalInputFragment = visit(optExpr.inputAt(0), context);
PlanFragment inputFragment = removeExchangeNodeForLocalShuffleAgg(originalInputFragment, context);
boolean withLocalShuffle = inputFragment != originalInputFragment;
Map<ColumnRefOperator, CallOperator> aggregations = node.getAggregations();
List<ColumnRefOperator> groupBys = node.getGroupBys();
List<ColumnRefOperator> partitionBys = node.getPartitionByColumns();
TupleDescriptor outputTupleDesc = context.getDescTbl().createTupleDescriptor();
AggregateExprInfo aggExpr =
buildAggregateTuple(aggregations, groupBys, partitionBys, outputTupleDesc, context);
ArrayList<Expr> groupingExpressions = aggExpr.groupExpr;
ArrayList<FunctionCallExpr> aggregateExprList = aggExpr.aggregateExpr;
ArrayList<Expr> partitionExpressions = aggExpr.partitionExpr;
ArrayList<Expr> intermediateAggrExprs = aggExpr.intermediateExpr;
AggregationNode aggregationNode;
if (node.getType().isLocal() && node.isSplit()) {
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.FIRST);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(), aggInfo);
aggregationNode.unsetNeedsFinalize();
aggregationNode.setIsPreagg(node.isUseStreamingPreAgg());
aggregationNode.setIntermediateTuple();
if (!partitionExpressions.isEmpty()) {
inputFragment.setOutputPartition(DataPartition.hashPartitioned(partitionExpressions));
}
if (!withLocalShuffle && !node.isUseStreamingPreAgg() &&
hasColocateOlapScanChildInFragment(aggregationNode)) {
aggregationNode.setColocate(true);
}
} else if (node.getType().isGlobal() || (node.getType().isLocal() && !node.isSplit())) {
if (node.hasSingleDistinct()) {
for (int i = 0; i < aggregateExprList.size(); i++) {
if (i != node.getSingleDistinctFunctionPos()) {
aggregateExprList.get(i).setMergeAggFn();
}
}
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.SECOND);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(),
aggInfo);
} else if (!node.isSplit()) {
rewriteAggDistinctFirstStageFunction(aggregateExprList);
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.FIRST);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(),
aggInfo);
} else {
aggregateExprList.forEach(FunctionCallExpr::setMergeAggFn);
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.SECOND_MERGE);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(),
aggInfo);
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
aggregationNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
aggregationNode.setLimit(node.getLimit());
if (!withLocalShuffle && hasColocateOlapScanChildInFragment(aggregationNode)) {
aggregationNode.setColocate(true);
}
} else if (node.getType().isDistinctGlobal()) {
aggregateExprList.forEach(FunctionCallExpr::setMergeAggFn);
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.FIRST_MERGE);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(), aggInfo);
aggregationNode.unsetNeedsFinalize();
aggregationNode.setIntermediateTuple();
if (!withLocalShuffle && hasColocateOlapScanChildInFragment(aggregationNode)) {
aggregationNode.setColocate(true);
}
} else if (node.getType().isDistinctLocal()) {
for (int i = 0; i < aggregateExprList.size(); i++) {
if (i != node.getSingleDistinctFunctionPos()) {
aggregateExprList.get(i).setMergeAggFn();
}
}
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.SECOND);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(), aggInfo);
aggregationNode.unsetNeedsFinalize();
aggregationNode.setIsPreagg(node.isUseStreamingPreAgg());
aggregationNode.setIntermediateTuple();
} else {
throw unsupportedException("Not support aggregate type : " + node.getType());
}
aggregationNode.setUseSortAgg(node.isUseSortAgg());
aggregationNode.setStreamingPreaggregationMode(context.getConnectContext().
getSessionVariable().getStreamingPreaggregationMode());
aggregationNode.setHasNullableGenerateChild();
aggregationNode.computeStatistics(optExpr.getStatistics());
if (node.isOnePhaseAgg() || node.isMergedLocalAgg() || node.getType().isDistinctGlobal()) {
inputFragment.setAssignScanRangesPerDriverSeq(!withLocalShuffle);
aggregationNode.setWithLocalShuffle(withLocalShuffle);
aggregationNode.setIdenticallyDistributed(true);
}
aggregationNode.getAggInfo().setIntermediateAggrExprs(intermediateAggrExprs);
inputFragment.setPlanRoot(aggregationNode);
return inputFragment;
}
public boolean hasColocateOlapScanChildInFragment(PlanNode node) {
if (node instanceof OlapScanNode) {
ColocateTableIndex colocateIndex = GlobalStateMgr.getCurrentColocateIndex();
OlapScanNode scanNode = (OlapScanNode) node;
if (colocateIndex.isColocateTable(scanNode.getOlapTable().getId())) {
return true;
}
}
if (node instanceof ExchangeNode) {
return false;
}
boolean hasOlapScanChild = false;
for (PlanNode child : node.getChildren()) {
hasOlapScanChild |= hasColocateOlapScanChildInFragment(child);
}
return hasOlapScanChild;
}
public void rewriteAggDistinctFirstStageFunction(List<FunctionCallExpr> aggregateExprList) {
int singleDistinctCount = 0;
int singleDistinctIndex = 0;
FunctionCallExpr functionCallExpr = null;
for (int i = 0; i < aggregateExprList.size(); ++i) {
FunctionCallExpr callExpr = aggregateExprList.get(i);
if (callExpr.isDistinct()) {
++singleDistinctCount;
functionCallExpr = callExpr;
singleDistinctIndex = i;
}
}
if (singleDistinctCount == 1) {
FunctionCallExpr replaceExpr = null;
final String functionName = functionCallExpr.getFnName().getFunction();
if (functionName.equalsIgnoreCase(FunctionSet.COUNT)) {
replaceExpr = new FunctionCallExpr(FunctionSet.MULTI_DISTINCT_COUNT, functionCallExpr.getParams());
replaceExpr.setFn(Expr.getBuiltinFunction(FunctionSet.MULTI_DISTINCT_COUNT,
new Type[] {functionCallExpr.getChild(0).getType()},
IS_NONSTRICT_SUPERTYPE_OF));
replaceExpr.getParams().setIsDistinct(false);
} else if (functionName.equalsIgnoreCase(FunctionSet.SUM)) {
replaceExpr = new FunctionCallExpr(FunctionSet.MULTI_DISTINCT_SUM, functionCallExpr.getParams());
Function multiDistinctSum = DecimalV3FunctionAnalyzer.convertSumToMultiDistinctSum(
functionCallExpr.getFn(), functionCallExpr.getChild(0).getType());
replaceExpr.setFn(multiDistinctSum);
replaceExpr.getParams().setIsDistinct(false);
}
Preconditions.checkState(replaceExpr != null);
ExpressionAnalyzer.analyzeExpressionIgnoreSlot(replaceExpr, ConnectContext.get());
aggregateExprList.set(singleDistinctIndex, replaceExpr);
}
}
@Override
public PlanFragment visitPhysicalDistribution(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalDistributionOperator distribution = (PhysicalDistributionOperator) optExpr.getOp();
ExchangeNode exchangeNode = new ExchangeNode(context.getNextNodeId(),
inputFragment.getPlanRoot(), distribution.getDistributionSpec().getType());
DataPartition dataPartition;
if (DistributionSpec.DistributionType.GATHER.equals(distribution.getDistributionSpec().getType())) {
exchangeNode.setNumInstances(1);
dataPartition = DataPartition.UNPARTITIONED;
GatherDistributionSpec spec = (GatherDistributionSpec) distribution.getDistributionSpec();
if (spec.hasLimit()) {
exchangeNode.setLimit(spec.getLimit());
}
} else if (DistributionSpec.DistributionType.BROADCAST
.equals(distribution.getDistributionSpec().getType())) {
exchangeNode.setNumInstances(inputFragment.getPlanRoot().getNumInstances());
dataPartition = DataPartition.UNPARTITIONED;
} else if (DistributionSpec.DistributionType.SHUFFLE.equals(distribution.getDistributionSpec().getType())) {
exchangeNode.setNumInstances(inputFragment.getPlanRoot().getNumInstances());
List<ColumnRefOperator> partitionColumns =
getShuffleColumns((HashDistributionSpec) distribution.getDistributionSpec());
List<Expr> distributeExpressions =
partitionColumns.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
dataPartition = DataPartition.hashPartitioned(distributeExpressions);
} else {
throw new StarRocksPlannerException("Unsupport exchange type : "
+ distribution.getDistributionSpec().getType(), INTERNAL_ERROR);
}
exchangeNode.setDataPartition(dataPartition);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), exchangeNode, dataPartition);
fragment.setQueryGlobalDicts(distribution.getGlobalDicts());
inputFragment.setDestination(exchangeNode);
inputFragment.setOutputPartition(dataPartition);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalTopN(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalTopNOperator topN = (PhysicalTopNOperator) optExpr.getOp();
Preconditions.checkState(topN.getOffset() >= 0);
if (!topN.isSplit()) {
return buildPartialTopNFragment(optExpr, context, topN.getPartitionByColumns(),
topN.getPartitionLimit(), topN.getOrderSpec(),
topN.getTopNType(), topN.getLimit(), topN.getOffset(), inputFragment);
} else {
return buildFinalTopNFragment(context, topN.getTopNType(), topN.getLimit(), topN.getOffset(),
inputFragment, optExpr);
}
}
private PlanFragment buildFinalTopNFragment(ExecPlan context, TopNType topNType, long limit, long offset,
PlanFragment inputFragment,
OptExpression optExpr) {
ExchangeNode exchangeNode = new ExchangeNode(context.getNextNodeId(),
inputFragment.getPlanRoot(),
DistributionSpec.DistributionType.GATHER);
exchangeNode.setNumInstances(1);
DataPartition dataPartition = DataPartition.UNPARTITIONED;
exchangeNode.setDataPartition(dataPartition);
Preconditions.checkState(inputFragment.getPlanRoot() instanceof SortNode);
SortNode sortNode = (SortNode) inputFragment.getPlanRoot();
sortNode.setTopNType(topNType);
exchangeNode.setMergeInfo(sortNode.getSortInfo(), offset);
exchangeNode.computeStatistics(optExpr.getStatistics());
if (TopNType.ROW_NUMBER.equals(topNType)) {
exchangeNode.setLimit(limit);
} else {
exchangeNode.unsetLimit();
}
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), exchangeNode, dataPartition);
inputFragment.setDestination(exchangeNode);
inputFragment.setOutputPartition(dataPartition);
fragment.setQueryGlobalDicts(inputFragment.getQueryGlobalDicts());
context.getFragments().add(fragment);
return fragment;
}
private PlanFragment buildPartialTopNFragment(OptExpression optExpr, ExecPlan context,
List<ColumnRefOperator> partitionByColumns, long partitionLimit,
OrderSpec orderSpec, TopNType topNType, long limit, long offset,
PlanFragment inputFragment) {
List<Expr> resolvedTupleExprs = Lists.newArrayList();
List<Expr> partitionExprs = Lists.newArrayList();
List<Expr> sortExprs = Lists.newArrayList();
TupleDescriptor sortTuple = context.getDescTbl().createTupleDescriptor();
if (CollectionUtils.isNotEmpty(partitionByColumns)) {
for (ColumnRefOperator partitionByColumn : partitionByColumns) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(partitionByColumn,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
partitionExprs.add(expr);
}
}
for (Ordering ordering : orderSpec.getOrderDescs()) {
Expr sortExpr = ScalarOperatorToExpr.buildExecExpression(ordering.getColumnRef(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(sortTuple, new SlotId(ordering.getColumnRef().getId()));
slotDesc.initFromExpr(sortExpr);
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(sortExpr.isNullable());
slotDesc.setType(sortExpr.getType());
context.getColRefToExpr()
.put(ordering.getColumnRef(), new SlotRef(ordering.getColumnRef().toString(), slotDesc));
resolvedTupleExprs.add(sortExpr);
sortExprs.add(new SlotRef(slotDesc));
}
ColumnRefSet columnRefSet = optExpr.inputAt(0).getLogicalProperty().getOutputColumns();
for (int i = 0; i < columnRefSet.getColumnIds().length; ++i) {
/*
* Add column not be used in ordering
*/
ColumnRefOperator columnRef = columnRefFactory.getColumnRef(columnRefSet.getColumnIds()[i]);
if (orderSpec.getOrderDescs().stream().map(Ordering::getColumnRef)
.noneMatch(c -> c.equals(columnRef))) {
Expr outputExpr = ScalarOperatorToExpr.buildExecExpression(columnRef,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(sortTuple, new SlotId(columnRef.getId()));
slotDesc.initFromExpr(outputExpr);
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(outputExpr.isNullable());
slotDesc.setType(outputExpr.getType());
context.getColRefToExpr().put(columnRef, new SlotRef(columnRef.toString(), slotDesc));
resolvedTupleExprs.add(outputExpr);
}
}
sortTuple.computeMemLayout();
SortInfo sortInfo = new SortInfo(partitionExprs, partitionLimit, sortExprs,
orderSpec.getOrderDescs().stream().map(Ordering::isAscending).collect(Collectors.toList()),
orderSpec.getOrderDescs().stream().map(Ordering::isNullsFirst).collect(Collectors.toList()));
sortInfo.setMaterializedTupleInfo(sortTuple, resolvedTupleExprs);
SortNode sortNode = new SortNode(
context.getNextNodeId(),
inputFragment.getPlanRoot(),
sortInfo,
limit != Operator.DEFAULT_LIMIT,
limit == Operator.DEFAULT_LIMIT,
0);
sortNode.setTopNType(topNType);
sortNode.setLimit(limit);
sortNode.setOffset(offset);
sortNode.resolvedTupleExprs = resolvedTupleExprs;
sortNode.setHasNullableGenerateChild();
sortNode.computeStatistics(optExpr.getStatistics());
if (shouldBuildGlobalRuntimeFilter()) {
sortNode.buildRuntimeFilters(runtimeFilterIdIdGenerator, context.getDescTbl());
}
inputFragment.setPlanRoot(sortNode);
return inputFragment;
}
private void setJoinPushDown(JoinNode node) {
node.setIsPushDown(ConnectContext.get().getSessionVariable().isHashJoinPushDownRightTable()
&& (node.getJoinOp().isInnerJoin() || node.getJoinOp().isLeftSemiJoin() ||
node.getJoinOp().isRightJoin()));
}
private boolean shouldBuildGlobalRuntimeFilter() {
return ConnectContext.get() != null &&
(ConnectContext.get().getSessionVariable().getEnableGlobalRuntimeFilter() ||
ConnectContext.get().getSessionVariable().isEnablePipelineEngine());
}
@Override
public PlanFragment visitPhysicalHashJoin(OptExpression optExpr, ExecPlan context) {
PlanFragment leftFragment = visit(optExpr.inputAt(0), context);
PlanFragment rightFragment = visit(optExpr.inputAt(1), context);
return visitPhysicalJoin(leftFragment, rightFragment, optExpr, context);
}
private List<Expr> extractConjuncts(ScalarOperator predicate, ExecPlan context) {
return Utils.extractConjuncts(predicate).stream()
.map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
}
private void setNullableForJoin(JoinOperator joinOperator,
PlanFragment leftFragment, PlanFragment rightFragment, ExecPlan context) {
Set<TupleId> nullableTupleIds = new HashSet<>();
nullableTupleIds.addAll(leftFragment.getPlanRoot().getNullableTupleIds());
nullableTupleIds.addAll(rightFragment.getPlanRoot().getNullableTupleIds());
if (joinOperator.isLeftOuterJoin()) {
nullableTupleIds.addAll(rightFragment.getPlanRoot().getTupleIds());
} else if (joinOperator.isRightOuterJoin()) {
nullableTupleIds.addAll(leftFragment.getPlanRoot().getTupleIds());
} else if (joinOperator.isFullOuterJoin()) {
nullableTupleIds.addAll(leftFragment.getPlanRoot().getTupleIds());
nullableTupleIds.addAll(rightFragment.getPlanRoot().getTupleIds());
}
for (TupleId tupleId : nullableTupleIds) {
TupleDescriptor tupleDescriptor = context.getDescTbl().getTupleDesc(tupleId);
tupleDescriptor.getSlots().forEach(slot -> slot.setIsNullable(true));
tupleDescriptor.computeMemLayout();
}
}
@Override
public PlanFragment visitPhysicalNestLoopJoin(OptExpression optExpr, ExecPlan context) {
PhysicalJoinOperator node = (PhysicalJoinOperator) optExpr.getOp();
PlanFragment leftFragment = visit(optExpr.inputAt(0), context);
PlanFragment rightFragment = visit(optExpr.inputAt(1), context);
List<Expr> conjuncts = extractConjuncts(node.getPredicate(), context);
List<Expr> joinOnConjuncts = extractConjuncts(node.getOnPredicate(), context);
List<Expr> probePartitionByExprs = Lists.newArrayList();
DistributionSpec leftDistributionSpec =
optExpr.getRequiredProperties().get(0).getDistributionProperty().getSpec();
DistributionSpec rightDistributionSpec =
optExpr.getRequiredProperties().get(1).getDistributionProperty().getSpec();
if (leftDistributionSpec instanceof HashDistributionSpec &&
rightDistributionSpec instanceof HashDistributionSpec) {
probePartitionByExprs = getShuffleExprs((HashDistributionSpec) leftDistributionSpec, context);
}
setNullableForJoin(node.getJoinType(), leftFragment, rightFragment, context);
NestLoopJoinNode joinNode = new NestLoopJoinNode(context.getNextNodeId(),
leftFragment.getPlanRoot(), rightFragment.getPlanRoot(),
null, node.getJoinType(), Lists.newArrayList(), joinOnConjuncts);
joinNode.setLimit(node.getLimit());
joinNode.computeStatistics(optExpr.getStatistics());
joinNode.addConjuncts(conjuncts);
joinNode.setProbePartitionByExprs(probePartitionByExprs);
rightFragment.getPlanRoot().setFragment(leftFragment);
context.getFragments().remove(rightFragment);
context.getFragments().remove(leftFragment);
context.getFragments().add(leftFragment);
leftFragment.setPlanRoot(joinNode);
leftFragment.addChildren(rightFragment.getChildren());
if (!(joinNode.getChild(1) instanceof ExchangeNode)) {
joinNode.setReplicated(true);
}
if (shouldBuildGlobalRuntimeFilter()) {
joinNode.buildRuntimeFilters(runtimeFilterIdIdGenerator, context.getDescTbl());
}
leftFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
return leftFragment;
}
@Override
public PlanFragment visitPhysicalMergeJoin(OptExpression optExpr, ExecPlan context) {
PlanFragment leftFragment = visit(optExpr.inputAt(0), context);
PlanFragment rightFragment = visit(optExpr.inputAt(1), context);
PlanNode leftPlanRoot = leftFragment.getPlanRoot();
PlanNode rightPlanRoot = rightFragment.getPlanRoot();
OptExpression leftExpression = optExpr.inputAt(0);
OptExpression rightExpression = optExpr.inputAt(1);
boolean needDealSort = leftExpression.getInputs().size() > 0 && rightExpression.getInputs().size() > 0;
if (needDealSort) {
optExpr.setChild(0, leftExpression.inputAt(0));
optExpr.setChild(1, rightExpression.inputAt(0));
leftFragment.setPlanRoot(leftPlanRoot.getChild(0));
rightFragment.setPlanRoot(rightPlanRoot.getChild(0));
}
PlanFragment planFragment = visitPhysicalJoin(leftFragment, rightFragment, optExpr, context);
if (needDealSort) {
leftExpression.setChild(0, optExpr.inputAt(0));
rightExpression.setChild(0, optExpr.inputAt(1));
optExpr.setChild(0, leftExpression);
optExpr.setChild(1, rightExpression);
planFragment.getPlanRoot().setChild(0, leftPlanRoot);
planFragment.getPlanRoot().setChild(1, rightPlanRoot);
}
return planFragment;
}
private List<ColumnRefOperator> getShuffleColumns(HashDistributionSpec spec) {
List<DistributionCol> columns = spec.getShuffleColumns();
Preconditions.checkState(!columns.isEmpty());
List<ColumnRefOperator> shuffleColumns = new ArrayList<>();
for (DistributionCol column : columns) {
shuffleColumns.add(columnRefFactory.getColumnRef(column.getColId()));
}
return shuffleColumns;
}
private List<Expr> getShuffleExprs(HashDistributionSpec hashDistributionSpec, ExecPlan context) {
List<ColumnRefOperator> shuffleColumns = getShuffleColumns(hashDistributionSpec);
return shuffleColumns.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
}
private PlanFragment visitPhysicalJoin(PlanFragment leftFragment, PlanFragment rightFragment,
OptExpression optExpr, ExecPlan context) {
PhysicalJoinOperator node = (PhysicalJoinOperator) optExpr.getOp();
JoinOperator joinOperator = node.getJoinType();
Preconditions.checkState(!joinOperator.isCrossJoin(), "should not be cross join");
PlanNode leftFragmentPlanRoot = leftFragment.getPlanRoot();
PlanNode rightFragmentPlanRoot = rightFragment.getPlanRoot();
if (leftFragmentPlanRoot instanceof DecodeNode) {
leftFragmentPlanRoot = leftFragmentPlanRoot.getChild(0);
}
if (rightFragmentPlanRoot instanceof DecodeNode) {
rightFragmentPlanRoot = rightFragmentPlanRoot.getChild(0);
}
List<Expr> probePartitionByExprs = Lists.newArrayList();
DistributionSpec leftDistributionSpec =
optExpr.getRequiredProperties().get(0).getDistributionProperty().getSpec();
DistributionSpec rightDistributionSpec =
optExpr.getRequiredProperties().get(1).getDistributionProperty().getSpec();
if (leftDistributionSpec instanceof HashDistributionSpec &&
rightDistributionSpec instanceof HashDistributionSpec) {
probePartitionByExprs = getShuffleExprs((HashDistributionSpec) leftDistributionSpec, context);
}
JoinNode.DistributionMode distributionMode =
inferDistributionMode(optExpr, leftFragmentPlanRoot, rightFragmentPlanRoot);
JoinExprInfo joinExpr = buildJoinExpr(optExpr, context);
List<Expr> eqJoinConjuncts = joinExpr.eqJoinConjuncts;
List<Expr> otherJoinConjuncts = joinExpr.otherJoin;
List<Expr> conjuncts = joinExpr.conjuncts;
setNullableForJoin(joinOperator, leftFragment, rightFragment, context);
JoinNode joinNode;
if (node instanceof PhysicalHashJoinOperator) {
joinNode = new HashJoinNode(
context.getNextNodeId(),
leftFragment.getPlanRoot(), rightFragment.getPlanRoot(),
joinOperator, eqJoinConjuncts, otherJoinConjuncts);
} else if (node instanceof PhysicalMergeJoinOperator) {
joinNode = new MergeJoinNode(
context.getNextNodeId(),
leftFragment.getPlanRoot(), rightFragment.getPlanRoot(),
joinOperator, eqJoinConjuncts, otherJoinConjuncts);
} else {
throw new StarRocksPlannerException("unknown join operator: " + node, INTERNAL_ERROR);
}
fillSlotsInfo(node.getProjection(), joinNode, optExpr, joinExpr.requiredColsForFilter);
joinNode.setDistributionMode(distributionMode);
joinNode.getConjuncts().addAll(conjuncts);
joinNode.setLimit(node.getLimit());
joinNode.computeStatistics(optExpr.getStatistics());
joinNode.setProbePartitionByExprs(probePartitionByExprs);
if (shouldBuildGlobalRuntimeFilter()) {
joinNode.buildRuntimeFilters(runtimeFilterIdIdGenerator, context.getDescTbl());
}
return buildJoinFragment(context, leftFragment, rightFragment, distributionMode, joinNode);
}
private boolean isExchangeWithDistributionType(PlanNode node, DistributionSpec.DistributionType expectedType) {
if (!(node instanceof ExchangeNode)) {
return false;
}
ExchangeNode exchangeNode = (ExchangeNode) node;
return Objects.equals(exchangeNode.getDistributionType(), expectedType);
}
private boolean isColocateJoin(OptExpression optExpression) {
return optExpression.getRequiredProperties().stream().allMatch(
physicalPropertySet -> {
if (!physicalPropertySet.getDistributionProperty().isShuffle()) {
return false;
}
HashDistributionDesc.SourceType hashSourceType =
((HashDistributionSpec) (physicalPropertySet.getDistributionProperty().getSpec()))
.getHashDistributionDesc().getSourceType();
return hashSourceType.equals(HashDistributionDesc.SourceType.LOCAL);
});
}
public boolean isShuffleJoin(OptExpression optExpression) {
return optExpression.getRequiredProperties().stream().allMatch(
physicalPropertySet -> {
if (!physicalPropertySet.getDistributionProperty().isShuffle()) {
return false;
}
HashDistributionDesc.SourceType hashSourceType =
((HashDistributionSpec) (physicalPropertySet.getDistributionProperty().getSpec()))
.getHashDistributionDesc().getSourceType();
return hashSourceType.equals(HashDistributionDesc.SourceType.SHUFFLE_JOIN) ||
hashSourceType.equals(HashDistributionDesc.SourceType.SHUFFLE_ENFORCE) ||
hashSourceType.equals(HashDistributionDesc.SourceType.SHUFFLE_AGG);
});
}
public PlanFragment computeBucketShufflePlanFragment(ExecPlan context,
PlanFragment stayFragment,
PlanFragment removeFragment, JoinNode hashJoinNode) {
hashJoinNode.setLocalHashBucket(true);
hashJoinNode.setPartitionExprs(removeFragment.getDataPartition().getPartitionExprs());
removeFragment.getChild(0)
.setOutputPartition(new DataPartition(TPartitionType.BUCKET_SHUFFLE_HASH_PARTITIONED,
removeFragment.getDataPartition().getPartitionExprs()));
context.getFragments().remove(removeFragment);
context.getFragments().remove(stayFragment);
context.getFragments().add(stayFragment);
stayFragment.setPlanRoot(hashJoinNode);
stayFragment.addChildren(removeFragment.getChildren());
stayFragment.mergeQueryGlobalDicts(removeFragment.getQueryGlobalDicts());
return stayFragment;
}
public PlanFragment computeShuffleHashBucketPlanFragment(ExecPlan context,
PlanFragment stayFragment,
PlanFragment removeFragment,
JoinNode hashJoinNode) {
hashJoinNode.setPartitionExprs(removeFragment.getDataPartition().getPartitionExprs());
DataPartition dataPartition = new DataPartition(TPartitionType.HASH_PARTITIONED,
removeFragment.getDataPartition().getPartitionExprs());
removeFragment.getChild(0).setOutputPartition(dataPartition);
context.getFragments().remove(removeFragment);
context.getFragments().remove(stayFragment);
context.getFragments().add(stayFragment);
stayFragment.setPlanRoot(hashJoinNode);
stayFragment.addChildren(removeFragment.getChildren());
stayFragment.mergeQueryGlobalDicts(removeFragment.getQueryGlobalDicts());
return stayFragment;
}
@Override
public PlanFragment visitPhysicalAssertOneRow(OptExpression optExpression, ExecPlan context) {
PlanFragment inputFragment = visit(optExpression.inputAt(0), context);
for (TupleId id : inputFragment.getPlanRoot().getTupleIds()) {
context.getDescTbl().getTupleDesc(id).getSlots().forEach(s -> s.setIsNullable(true));
}
PhysicalAssertOneRowOperator assertOneRow = (PhysicalAssertOneRowOperator) optExpression.getOp();
AssertNumRowsNode node =
new AssertNumRowsNode(context.getNextNodeId(), inputFragment.getPlanRoot(),
new AssertNumRowsElement(assertOneRow.getCheckRows(), assertOneRow.getTips(),
assertOneRow.getAssertion()));
node.computeStatistics(optExpression.getStatistics());
inputFragment.setPlanRoot(node);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalAnalytic(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalWindowOperator node = (PhysicalWindowOperator) optExpr.getOp();
List<Expr> analyticFnCalls = new ArrayList<>();
TupleDescriptor outputTupleDesc = context.getDescTbl().createTupleDescriptor();
for (Map.Entry<ColumnRefOperator, CallOperator> analyticCall : node.getAnalyticCall().entrySet()) {
Expr analyticFunction = ScalarOperatorToExpr.buildExecExpression(analyticCall.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
analyticFnCalls.add(analyticFunction);
SlotDescriptor slotDesc = context.getDescTbl()
.addSlotDescriptor(outputTupleDesc, new SlotId(analyticCall.getKey().getId()));
slotDesc.setType(analyticFunction.getType());
slotDesc.setIsNullable(analyticFunction.isNullable());
slotDesc.setIsMaterialized(true);
context.getColRefToExpr()
.put(analyticCall.getKey(), new SlotRef(analyticCall.getKey().toString(), slotDesc));
}
List<Expr> partitionExprs =
node.getPartitionExpressions().stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
List<OrderByElement> orderByElements = node.getOrderByElements().stream().map(e -> new OrderByElement(
ScalarOperatorToExpr.buildExecExpression(e.getColumnRef(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())),
e.isAscending(), e.isNullsFirst())).collect(Collectors.toList());
AnalyticEvalNode analyticEvalNode = new AnalyticEvalNode(
context.getNextNodeId(),
inputFragment.getPlanRoot(),
analyticFnCalls,
partitionExprs,
orderByElements,
node.getAnalyticWindow(),
node.isUseHashBasedPartition(),
null, outputTupleDesc, null, null,
context.getDescTbl().createTupleDescriptor());
analyticEvalNode.setSubstitutedPartitionExprs(partitionExprs);
analyticEvalNode.setLimit(node.getLimit());
analyticEvalNode.setHasNullableGenerateChild();
analyticEvalNode.computeStatistics(optExpr.getStatistics());
if (hasColocateOlapScanChildInFragment(analyticEvalNode)) {
analyticEvalNode.setColocate(true);
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
analyticEvalNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
PlanNode root = inputFragment.getPlanRoot();
if (root instanceof SortNode) {
SortNode sortNode = (SortNode) root;
sortNode.setAnalyticPartitionExprs(analyticEvalNode.getPartitionExprs());
}
inputFragment.setPlanRoot(analyticEvalNode);
return inputFragment;
}
private PlanFragment buildSetOperation(OptExpression optExpr, ExecPlan context, OperatorType operatorType) {
PhysicalSetOperation setOperation = (PhysicalSetOperation) optExpr.getOp();
TupleDescriptor setOperationTuple = context.getDescTbl().createTupleDescriptor();
for (ColumnRefOperator columnRefOperator : setOperation.getOutputColumnRefOp()) {
SlotDescriptor slotDesc = context.getDescTbl()
.addSlotDescriptor(setOperationTuple, new SlotId(columnRefOperator.getId()));
slotDesc.setType(columnRefOperator.getType());
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(columnRefOperator.isNullable());
context.getColRefToExpr().put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDesc));
}
SetOperationNode setOperationNode;
boolean isUnion = false;
if (operatorType.equals(OperatorType.PHYSICAL_UNION)) {
isUnion = true;
setOperationNode = new UnionNode(context.getNextNodeId(), setOperationTuple.getId());
setOperationNode.setFirstMaterializedChildIdx_(optExpr.arity());
} else if (operatorType.equals(OperatorType.PHYSICAL_EXCEPT)) {
setOperationNode = new ExceptNode(context.getNextNodeId(), setOperationTuple.getId());
} else if (operatorType.equals(OperatorType.PHYSICAL_INTERSECT)) {
setOperationNode = new IntersectNode(context.getNextNodeId(), setOperationTuple.getId());
} else {
throw new StarRocksPlannerException("Unsupported set operation", INTERNAL_ERROR);
}
List<Map<Integer, Integer>> outputSlotIdToChildSlotIdMaps = new ArrayList<>();
for (int childIdx = 0; childIdx < optExpr.arity(); ++childIdx) {
Map<Integer, Integer> slotIdMap = new HashMap<>();
List<ColumnRefOperator> childOutput = setOperation.getChildOutputColumns().get(childIdx);
Preconditions.checkState(childOutput.size() == setOperation.getOutputColumnRefOp().size());
for (int columnIdx = 0; columnIdx < setOperation.getOutputColumnRefOp().size(); ++columnIdx) {
Integer resultColumnIdx = setOperation.getOutputColumnRefOp().get(columnIdx).getId();
slotIdMap.put(resultColumnIdx, childOutput.get(columnIdx).getId());
}
outputSlotIdToChildSlotIdMaps.add(slotIdMap);
Preconditions.checkState(slotIdMap.size() == setOperation.getOutputColumnRefOp().size());
}
setOperationNode.setOutputSlotIdToChildSlotIdMaps(outputSlotIdToChildSlotIdMaps);
Preconditions.checkState(optExpr.getInputs().size() == setOperation.getChildOutputColumns().size());
PlanFragment setOperationFragment =
new PlanFragment(context.getNextFragmentId(), setOperationNode, DataPartition.RANDOM);
List<List<Expr>> materializedResultExprLists = Lists.newArrayList();
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (int i = 0; i < optExpr.getInputs().size(); i++) {
List<ColumnRefOperator> childOutput = setOperation.getChildOutputColumns().get(i);
PlanFragment fragment = visit(optExpr.getInputs().get(i), context);
List<Expr> materializedExpressions = Lists.newArrayList();
for (ColumnRefOperator ref : childOutput) {
materializedExpressions.add(ScalarOperatorToExpr.buildExecExpression(ref, formatterContext));
}
materializedResultExprLists.add(materializedExpressions);
if (isUnion) {
fragment.setOutputPartition(DataPartition.RANDOM);
} else {
fragment.setOutputPartition(DataPartition.hashPartitioned(materializedExpressions));
}
ExchangeNode exchangeNode =
new ExchangeNode(context.getNextNodeId(), fragment.getPlanRoot(), fragment.getDataPartition());
exchangeNode.setFragment(setOperationFragment);
fragment.setDestination(exchangeNode);
setOperationNode.addChild(exchangeNode);
}
setOperationNode.setHasNullableGenerateChild();
List<Expr> setOutputList = Lists.newArrayList();
for (ColumnRefOperator columnRefOperator : setOperation.getOutputColumnRefOp()) {
SlotDescriptor slotDesc = context.getDescTbl().getSlotDesc(new SlotId(columnRefOperator.getId()));
slotDesc.setIsNullable(slotDesc.getIsNullable() | setOperationNode.isHasNullableGenerateChild());
setOutputList.add(new SlotRef(String.valueOf(columnRefOperator.getId()), slotDesc));
}
setOperationTuple.computeMemLayout();
setOperationNode.setSetOperationOutputList(setOutputList);
setOperationNode.setMaterializedResultExprLists_(materializedResultExprLists);
setOperationNode.setLimit(setOperation.getLimit());
setOperationNode.computeStatistics(optExpr.getStatistics());
context.getFragments().add(setOperationFragment);
return setOperationFragment;
}
@Override
public PlanFragment visitPhysicalUnion(OptExpression optExpr, ExecPlan context) {
return buildSetOperation(optExpr, context, OperatorType.PHYSICAL_UNION);
}
@Override
public PlanFragment visitPhysicalExcept(OptExpression optExpr, ExecPlan context) {
return buildSetOperation(optExpr, context, OperatorType.PHYSICAL_EXCEPT);
}
@Override
public PlanFragment visitPhysicalIntersect(OptExpression optExpr, ExecPlan context) {
return buildSetOperation(optExpr, context, OperatorType.PHYSICAL_INTERSECT);
}
@Override
public PlanFragment visitPhysicalRepeat(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalRepeatOperator repeatOperator = (PhysicalRepeatOperator) optExpr.getOp();
TupleDescriptor outputGroupingTuple = context.getDescTbl().createTupleDescriptor();
for (ColumnRefOperator columnRefOperator : repeatOperator.getOutputGrouping()) {
SlotDescriptor slotDesc = context.getDescTbl()
.addSlotDescriptor(outputGroupingTuple, new SlotId(columnRefOperator.getId()));
slotDesc.setType(columnRefOperator.getType());
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(columnRefOperator.isNullable());
context.getColRefToExpr().put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDesc));
}
outputGroupingTuple.computeMemLayout();
List<Set<Integer>> repeatSlotIdList = new ArrayList<>();
for (List<ColumnRefOperator> repeat : repeatOperator.getRepeatColumnRef()) {
repeatSlotIdList.add(
repeat.stream().map(ColumnRefOperator::getId).collect(Collectors.toSet()));
}
RepeatNode repeatNode = new RepeatNode(
context.getNextNodeId(),
inputFragment.getPlanRoot(),
outputGroupingTuple,
repeatSlotIdList,
repeatOperator.getGroupingIds());
List<ScalarOperator> predicates = Utils.extractConjuncts(repeatOperator.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
repeatNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
repeatNode.computeStatistics(optExpr.getStatistics());
inputFragment.setPlanRoot(repeatNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalFilter(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalFilterOperator filter = (PhysicalFilterOperator) optExpr.getOp();
List<Expr> predicates = Utils.extractConjuncts(filter.getPredicate()).stream()
.map(d -> ScalarOperatorToExpr.buildExecExpression(d,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
SelectNode selectNode =
new SelectNode(context.getNextNodeId(), inputFragment.getPlanRoot(), predicates);
selectNode.setLimit(filter.getLimit());
selectNode.computeStatistics(optExpr.getStatistics());
inputFragment.setPlanRoot(selectNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalTableFunction(OptExpression optExpression, ExecPlan context) {
PlanFragment inputFragment = visit(optExpression.inputAt(0), context);
PhysicalTableFunctionOperator physicalTableFunction = (PhysicalTableFunctionOperator) optExpression.getOp();
TupleDescriptor udtfOutputTuple = context.getDescTbl().createTupleDescriptor();
for (ColumnRefOperator columnRefOperator : physicalTableFunction.getOutputColRefs()) {
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(udtfOutputTuple, new SlotId(columnRefOperator.getId()));
slotDesc.setType(columnRefOperator.getType());
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(columnRefOperator.isNullable());
context.getColRefToExpr().put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDesc));
}
udtfOutputTuple.computeMemLayout();
TableFunctionNode tableFunctionNode = new TableFunctionNode(context.getNextNodeId(),
inputFragment.getPlanRoot(),
udtfOutputTuple,
physicalTableFunction.getFn(),
physicalTableFunction.getFnParamColumnRefs().stream().map(ColumnRefOperator::getId)
.collect(Collectors.toList()),
physicalTableFunction.getOuterColRefs().stream().map(ColumnRefOperator::getId)
.collect(Collectors.toList()),
physicalTableFunction.getFnResultColRefs().stream().map(ColumnRefOperator::getId)
.collect(Collectors.toList())
);
tableFunctionNode.computeStatistics(optExpression.getStatistics());
tableFunctionNode.setLimit(physicalTableFunction.getLimit());
inputFragment.setPlanRoot(tableFunctionNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalLimit(OptExpression optExpression, ExecPlan context) {
return visit(optExpression.inputAt(0), context);
}
@Override
public PlanFragment visitPhysicalCTEConsume(OptExpression optExpression, ExecPlan context) {
PhysicalCTEConsumeOperator consume = (PhysicalCTEConsumeOperator) optExpression.getOp();
int cteId = consume.getCteId();
MultiCastPlanFragment cteFragment = (MultiCastPlanFragment) context.getCteProduceFragments().get(cteId);
ExchangeNode exchangeNode = new ExchangeNode(context.getNextNodeId(),
cteFragment.getPlanRoot(), DistributionSpec.DistributionType.SHUFFLE);
exchangeNode.setReceiveColumns(consume.getCteOutputColumnRefMap().values().stream()
.map(ColumnRefOperator::getId).collect(Collectors.toList()));
exchangeNode.setDataPartition(cteFragment.getDataPartition());
exchangeNode.setNumInstances(cteFragment.getPlanRoot().getNumInstances());
PlanFragment consumeFragment = new PlanFragment(context.getNextFragmentId(), exchangeNode,
cteFragment.getDataPartition());
Map<ColumnRefOperator, ScalarOperator> projectMap = Maps.newHashMap();
projectMap.putAll(consume.getCteOutputColumnRefMap());
consumeFragment = buildProjectNode(optExpression, new Projection(projectMap), consumeFragment, context);
consumeFragment.setQueryGlobalDicts(cteFragment.getQueryGlobalDicts());
consumeFragment.setLoadGlobalDicts(cteFragment.getLoadGlobalDicts());
if (consume.getPredicate() != null) {
List<Expr> predicates = Utils.extractConjuncts(consume.getPredicate()).stream()
.map(d -> ScalarOperatorToExpr.buildExecExpression(d,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
SelectNode selectNode =
new SelectNode(context.getNextNodeId(), consumeFragment.getPlanRoot(), predicates);
selectNode.computeStatistics(optExpression.getStatistics());
consumeFragment.setPlanRoot(selectNode);
}
if (consume.hasLimit()) {
consumeFragment.getPlanRoot().setLimit(consume.getLimit());
}
cteFragment.getDestNodeList().add(exchangeNode);
consumeFragment.addChild(cteFragment);
context.getFragments().add(consumeFragment);
return consumeFragment;
}
@Override
public PlanFragment visitPhysicalCTEProduce(OptExpression optExpression, ExecPlan context) {
PlanFragment child = visit(optExpression.inputAt(0), context);
int cteId = ((PhysicalCTEProduceOperator) optExpression.getOp()).getCteId();
context.getFragments().remove(child);
MultiCastPlanFragment cteProduce = new MultiCastPlanFragment(child);
List<Expr> outputs = Lists.newArrayList();
optExpression.getOutputColumns().getStream()
.forEach(i -> outputs.add(context.getColRefToExpr().get(columnRefFactory.getColumnRef(i))));
cteProduce.setOutputExprs(outputs);
context.getCteProduceFragments().put(cteId, cteProduce);
context.getFragments().add(cteProduce);
return child;
}
@Override
public PlanFragment visitPhysicalCTEAnchor(OptExpression optExpression, ExecPlan context) {
visit(optExpression.inputAt(0), context);
return visit(optExpression.inputAt(1), context);
}
@Override
public PlanFragment visitPhysicalNoCTE(OptExpression optExpression, ExecPlan context) {
return visit(optExpression.inputAt(0), context);
}
static class JoinExprInfo {
public final List<Expr> eqJoinConjuncts;
public final List<Expr> otherJoin;
public final List<Expr> conjuncts;
public final ColumnRefSet requiredColsForFilter;
public JoinExprInfo(List<Expr> eqJoinConjuncts, List<Expr> otherJoin, List<Expr> conjuncts,
ColumnRefSet requiredColsForFilter) {
this.eqJoinConjuncts = eqJoinConjuncts;
this.otherJoin = otherJoin;
this.conjuncts = conjuncts;
this.requiredColsForFilter = requiredColsForFilter;
}
}
private JoinExprInfo buildJoinExpr(OptExpression optExpr, ExecPlan context) {
ScalarOperator predicate = optExpr.getOp().getPredicate();
ScalarOperator onPredicate;
if (optExpr.getOp() instanceof PhysicalJoinOperator) {
onPredicate = ((PhysicalJoinOperator) optExpr.getOp()).getOnPredicate();
} else if (optExpr.getOp() instanceof PhysicalStreamJoinOperator) {
onPredicate = ((PhysicalStreamJoinOperator) optExpr.getOp()).getOnPredicate();
} else {
throw new IllegalStateException("not supported join " + optExpr.getOp());
}
List<ScalarOperator> onPredicates = Utils.extractConjuncts(onPredicate);
ColumnRefSet leftChildColumns = optExpr.inputAt(0).getOutputColumns();
ColumnRefSet rightChildColumns = optExpr.inputAt(1).getOutputColumns();
List<BinaryPredicateOperator> eqOnPredicates = JoinHelper.getEqualsPredicate(
leftChildColumns, rightChildColumns, onPredicates);
Preconditions.checkState(!eqOnPredicates.isEmpty(), "must be eq-join");
for (BinaryPredicateOperator s : eqOnPredicates) {
if (!optExpr.inputAt(0).getLogicalProperty().getOutputColumns()
.containsAll(s.getChild(0).getUsedColumns())) {
s.swap();
}
}
List<Expr> eqJoinConjuncts =
eqOnPredicates.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
for (Expr expr : eqJoinConjuncts) {
if (expr.isConstant()) {
throw unsupportedException("Support join on constant predicate later");
}
}
List<ScalarOperator> otherJoin = Utils.extractConjuncts(onPredicate);
otherJoin.removeAll(eqOnPredicates);
List<Expr> otherJoinConjuncts = otherJoin.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
List<ScalarOperator> predicates = Utils.extractConjuncts(predicate);
List<Expr> conjuncts = predicates.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
ColumnRefSet requiredColsForFilter = new ColumnRefSet();
otherJoin.stream().forEach(e -> requiredColsForFilter.union(e.getUsedColumns()));
predicates.stream().forEach(e -> requiredColsForFilter.union(e.getUsedColumns()));
return new JoinExprInfo(eqJoinConjuncts, otherJoinConjuncts, conjuncts, requiredColsForFilter);
}
@Override
public PlanFragment visitPhysicalStreamJoin(OptExpression optExpr, ExecPlan context) {
PhysicalStreamJoinOperator node = (PhysicalStreamJoinOperator) optExpr.getOp();
PlanFragment leftFragment = visit(optExpr.inputAt(0), context);
PlanFragment rightFragment = visit(optExpr.inputAt(1), context);
ColumnRefSet leftChildColumns = optExpr.inputAt(0).getLogicalProperty().getOutputColumns();
ColumnRefSet rightChildColumns = optExpr.inputAt(1).getLogicalProperty().getOutputColumns();
if (!node.getJoinType().isInnerJoin()) {
throw new NotImplementedException("Only inner join is supported");
}
JoinOperator joinOperator = node.getJoinType();
PlanNode leftFragmentPlanRoot = leftFragment.getPlanRoot();
PlanNode rightFragmentPlanRoot = rightFragment.getPlanRoot();
JoinNode.DistributionMode distributionMode = JoinNode.DistributionMode.SHUFFLE_HASH_BUCKET;
JoinExprInfo joinExpr = buildJoinExpr(optExpr, context);
List<Expr> eqJoinConjuncts = joinExpr.eqJoinConjuncts;
List<Expr> otherJoinConjuncts = joinExpr.otherJoin;
List<Expr> conjuncts = joinExpr.conjuncts;
List<PlanFragment> nullablePlanFragments = new ArrayList<>();
if (joinOperator.isLeftOuterJoin()) {
nullablePlanFragments.add(rightFragment);
} else if (joinOperator.isRightOuterJoin()) {
nullablePlanFragments.add(leftFragment);
} else if (joinOperator.isFullOuterJoin()) {
nullablePlanFragments.add(leftFragment);
nullablePlanFragments.add(rightFragment);
}
for (PlanFragment planFragment : nullablePlanFragments) {
for (TupleId tupleId : planFragment.getPlanRoot().getTupleIds()) {
context.getDescTbl().getTupleDesc(tupleId).getSlots().forEach(slot -> slot.setIsNullable(true));
}
}
JoinNode joinNode =
new StreamJoinNode(context.getNextNodeId(), leftFragmentPlanRoot, rightFragmentPlanRoot,
node.getJoinType(), eqJoinConjuncts, otherJoinConjuncts);
fillSlotsInfo(node.getProjection(), joinNode, optExpr, joinExpr.requiredColsForFilter);
joinNode.setDistributionMode(distributionMode);
joinNode.getConjuncts().addAll(conjuncts);
joinNode.setLimit(node.getLimit());
joinNode.computeStatistics(optExpr.getStatistics());
return buildJoinFragment(context, leftFragment, rightFragment, distributionMode, joinNode);
}
@NotNull
private PlanFragment buildJoinFragment(ExecPlan context, PlanFragment leftFragment, PlanFragment rightFragment,
JoinNode.DistributionMode distributionMode, JoinNode joinNode) {
if (distributionMode.equals(JoinNode.DistributionMode.BROADCAST)) {
setJoinPushDown(joinNode);
rightFragment.getPlanRoot().setFragment(leftFragment);
context.getFragments().remove(rightFragment);
context.getFragments().remove(leftFragment);
context.getFragments().add(leftFragment);
leftFragment.setPlanRoot(joinNode);
leftFragment.addChildren(rightFragment.getChildren());
leftFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
return leftFragment;
} else if (distributionMode.equals(JoinNode.DistributionMode.PARTITIONED)) {
DataPartition lhsJoinPartition = new DataPartition(TPartitionType.HASH_PARTITIONED,
leftFragment.getDataPartition().getPartitionExprs());
DataPartition rhsJoinPartition = new DataPartition(TPartitionType.HASH_PARTITIONED,
rightFragment.getDataPartition().getPartitionExprs());
leftFragment.getChild(0).setOutputPartition(lhsJoinPartition);
rightFragment.getChild(0).setOutputPartition(rhsJoinPartition);
context.getFragments().remove(leftFragment);
context.getFragments().remove(rightFragment);
PlanFragment joinFragment = new PlanFragment(context.getNextFragmentId(),
joinNode, lhsJoinPartition);
joinFragment.addChildren(leftFragment.getChildren());
joinFragment.addChildren(rightFragment.getChildren());
joinFragment.mergeQueryGlobalDicts(leftFragment.getQueryGlobalDicts());
joinFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
context.getFragments().add(joinFragment);
return joinFragment;
} else if (distributionMode.equals(JoinNode.DistributionMode.COLOCATE) ||
distributionMode.equals(JoinNode.DistributionMode.REPLICATED)) {
if (distributionMode.equals(JoinNode.DistributionMode.COLOCATE)) {
joinNode.setColocate(true, "");
} else {
joinNode.setReplicated(true);
}
setJoinPushDown(joinNode);
joinNode.setChild(0, leftFragment.getPlanRoot());
joinNode.setChild(1, rightFragment.getPlanRoot());
leftFragment.setPlanRoot(joinNode);
leftFragment.addChildren(rightFragment.getChildren());
context.getFragments().remove(rightFragment);
context.getFragments().remove(leftFragment);
context.getFragments().add(leftFragment);
leftFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
return leftFragment;
} else if (distributionMode.equals(JoinNode.DistributionMode.SHUFFLE_HASH_BUCKET)) {
setJoinPushDown(joinNode);
if (!(leftFragment.getPlanRoot() instanceof ExchangeNode) &&
!(rightFragment.getPlanRoot() instanceof ExchangeNode)) {
joinNode.setChild(0, leftFragment.getPlanRoot());
joinNode.setChild(1, rightFragment.getPlanRoot());
leftFragment.setPlanRoot(joinNode);
leftFragment.addChildren(rightFragment.getChildren());
context.getFragments().remove(rightFragment);
context.getFragments().remove(leftFragment);
context.getFragments().add(leftFragment);
leftFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
return leftFragment;
} else if (leftFragment.getPlanRoot() instanceof ExchangeNode &&
!(rightFragment.getPlanRoot() instanceof ExchangeNode)) {
return computeShuffleHashBucketPlanFragment(context, rightFragment,
leftFragment, joinNode);
} else {
return computeShuffleHashBucketPlanFragment(context, leftFragment,
rightFragment, joinNode);
}
} else {
setJoinPushDown(joinNode);
if (leftFragment.getPlanRoot() instanceof ExchangeNode &&
!(rightFragment.getPlanRoot() instanceof ExchangeNode)) {
leftFragment = computeBucketShufflePlanFragment(context, rightFragment,
leftFragment, joinNode);
} else {
leftFragment = computeBucketShufflePlanFragment(context, leftFragment,
rightFragment, joinNode);
}
return leftFragment;
}
}
@NotNull
private JoinNode.DistributionMode inferDistributionMode(OptExpression optExpr, PlanNode leftFragmentPlanRoot,
PlanNode rightFragmentPlanRoot) {
JoinNode.DistributionMode distributionMode;
if (isExchangeWithDistributionType(leftFragmentPlanRoot, DistributionSpec.DistributionType.SHUFFLE) &&
isExchangeWithDistributionType(rightFragmentPlanRoot,
DistributionSpec.DistributionType.SHUFFLE)) {
distributionMode = JoinNode.DistributionMode.PARTITIONED;
} else if (isExchangeWithDistributionType(rightFragmentPlanRoot,
DistributionSpec.DistributionType.BROADCAST)) {
distributionMode = JoinNode.DistributionMode.BROADCAST;
} else if (!(leftFragmentPlanRoot instanceof ExchangeNode) &&
!(rightFragmentPlanRoot instanceof ExchangeNode)) {
if (isColocateJoin(optExpr)) {
distributionMode = HashJoinNode.DistributionMode.COLOCATE;
} else if (ConnectContext.get().getSessionVariable().isEnableReplicationJoin() &&
rightFragmentPlanRoot.canDoReplicatedJoin()) {
distributionMode = JoinNode.DistributionMode.REPLICATED;
} else if (isShuffleJoin(optExpr)) {
distributionMode = JoinNode.DistributionMode.SHUFFLE_HASH_BUCKET;
} else {
Preconditions.checkState(false, "Must be colocate/bucket/replicate join");
distributionMode = JoinNode.DistributionMode.COLOCATE;
}
} else if (isShuffleJoin(optExpr)) {
distributionMode = JoinNode.DistributionMode.SHUFFLE_HASH_BUCKET;
} else {
distributionMode = JoinNode.DistributionMode.LOCAL_HASH_BUCKET;
}
return distributionMode;
}
@Override
public PlanFragment visitPhysicalStreamAgg(OptExpression optExpr, ExecPlan context) {
PhysicalStreamAggOperator node = (PhysicalStreamAggOperator) optExpr.getOp();
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
TupleDescriptor outputTupleDesc = context.getDescTbl().createTupleDescriptor();
AggregateExprInfo aggExpr =
buildAggregateTuple(node.getAggregations(), node.getGroupBys(), null, outputTupleDesc, context);
AggregateInfo aggInfo =
AggregateInfo.create(aggExpr.groupExpr, aggExpr.aggregateExpr, outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.FIRST);
StreamAggNode aggNode = new StreamAggNode(context.getNextNodeId(), inputFragment.getPlanRoot(), aggInfo);
aggNode.setHasNullableGenerateChild();
aggNode.computeStatistics(optExpr.getStatistics());
inputFragment.setPlanRoot(aggNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalStreamScan(OptExpression optExpr, ExecPlan context) {
PhysicalStreamScanOperator node = (PhysicalStreamScanOperator) optExpr.getOp();
OlapTable scanTable = (OlapTable) node.getTable();
context.getDescTbl().addReferencedTable(scanTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(scanTable);
BinlogScanNode binlogScanNode = new BinlogScanNode(context.getNextNodeId(), tupleDescriptor);
binlogScanNode.computeStatistics(optExpr.getStatistics());
try {
binlogScanNode.computeScanRanges();
} catch (UserException e) {
throw new StarRocksPlannerException(
"Failed to compute scan ranges for StreamScanNode, " + e.getMessage(), INTERNAL_ERROR);
}
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
binlogScanNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(binlogScanNode);
PlanFragment fragment = new PlanFragment(context.getNextFragmentId(), binlogScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
private void fillSlotsInfo(Projection projection, JoinNode joinNode, OptExpression optExpr,
ColumnRefSet requiredColsForFilter) {
ColumnRefSet outputCols = new ColumnRefSet();
if (projection != null) {
for (ScalarOperator s : projection.getColumnRefMap().values()) {
outputCols.union(s.getUsedColumns());
}
for (ScalarOperator s : projection.getCommonSubOperatorMap().values()) {
outputCols.union(s.getUsedColumns());
}
outputCols.except(new ArrayList<>(projection.getCommonSubOperatorMap().keySet()));
outputCols.union(requiredColsForFilter);
if (outputCols.isEmpty()) {
outputCols.union(optExpr.inputAt(1).getOutputColumns().getFirstId());
}
joinNode.setOutputSlots(outputCols.getStream().collect(Collectors.toList()));
}
}
@Override
public PlanFragment visitPhysicalTableFunctionTableScan(OptExpression optExpression, ExecPlan context) {
PhysicalTableFunctionTableScanOperator node =
(PhysicalTableFunctionTableScanOperator) optExpression.getOp();
TableFunctionTable table = (TableFunctionTable) node.getTable();
TupleDescriptor tupleDesc = buildTupleDesc(context, table);
List<List<TBrokerFileStatus>> files = new ArrayList<>();
files.add(table.fileList());
FileScanNode scanNode = new FileScanNode(context.getNextNodeId(), tupleDesc,
"FileScanNode", files, table.fileList().size());
List<BrokerFileGroup> fileGroups = new ArrayList<>();
try {
BrokerFileGroup grp = new BrokerFileGroup(table);
fileGroups.add(grp);
} catch (UserException e) {
throw new StarRocksPlannerException(
"Build Exec FileScanNode fail, scan info is invalid," + e.getMessage(),
INTERNAL_ERROR);
}
prepareContextSlots(node, context, tupleDesc);
scanNode.setLoadInfo(-1, -1, table, new BrokerDesc(table.getProperties()), fileGroups, false, 1);
scanNode.setUseVectorizedLoad(true);
Analyzer analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), context.getConnectContext());
try {
scanNode.init(analyzer);
scanNode.finalizeStats(analyzer);
} catch (UserException e) {
throw new StarRocksPlannerException(
"Build Exec FileScanNode fail, scan info is invalid," + e.getMessage(),
INTERNAL_ERROR);
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
scanNode.setLimit(node.getLimit());
scanNode.computeStatistics(optExpression.getStatistics());
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
}
} | class PlanFragmentBuilder {
private static final Logger LOG = LogManager.getLogger(PlanFragmentBuilder.class);
public static ExecPlan createPhysicalPlan(OptExpression plan, ConnectContext connectContext,
List<ColumnRefOperator> outputColumns, ColumnRefFactory columnRefFactory,
List<String> colNames,
TResultSinkType resultSinkType,
boolean hasOutputFragment) {
ExecPlan execPlan = new ExecPlan(connectContext, colNames, plan, outputColumns);
createOutputFragment(new PhysicalPlanTranslator(columnRefFactory).translate(plan, execPlan), execPlan,
outputColumns, hasOutputFragment);
execPlan.setPlanCount(plan.getPlanCount());
return finalizeFragments(execPlan, resultSinkType);
}
public static ExecPlan createPhysicalPlanForMV(ConnectContext connectContext,
CreateMaterializedViewStatement createStmt,
OptExpression optExpr,
LogicalPlan logicalPlan,
QueryRelation queryRelation,
ColumnRefFactory columnRefFactory) throws DdlException {
List<String> colNames = queryRelation.getColumnOutputNames();
List<ColumnRefOperator> outputColumns = logicalPlan.getOutputColumn();
ExecPlan execPlan = new ExecPlan(connectContext, colNames, optExpr, outputColumns);
PlanFragment planFragment = new PhysicalPlanTranslator(columnRefFactory).translate(optExpr, execPlan);
execPlan.setPlanCount(optExpr.getPlanCount());
createStmt.setMaintenancePlan(execPlan, columnRefFactory);
for (PlanFragment fragment : execPlan.getFragments()) {
fragment.createDataSink(TResultSinkType.MYSQL_PROTOCAL);
}
Collections.reverse(execPlan.getFragments());
PartitionInfo partitionInfo = LocalMetastore.buildPartitionInfo(createStmt);
long mvId = GlobalStateMgr.getCurrentState().getNextId();
long dbId = GlobalStateMgr.getCurrentState().getDb(createStmt.getTableName().getDb()).getId();
MaterializedView view =
MaterializedViewMgr.getInstance().createSinkTable(createStmt, partitionInfo, mvId, dbId);
TupleDescriptor tupleDesc = buildTupleDesc(execPlan, view);
view.setMaintenancePlan(execPlan);
List<Long> fakePartitionIds = Arrays.asList(1L, 2L, 3L);
DataSink tableSink = new OlapTableSink(view, tupleDesc, fakePartitionIds, true,
view.writeQuorum(), view.enableReplicatedStorage(), false, false);
execPlan.getTopFragment().setSink(tableSink);
return execPlan;
}
public static TupleDescriptor buildTupleDesc(ExecPlan execPlan, Table table) {
DescriptorTable descriptorTable = execPlan.getDescTbl();
TupleDescriptor olapTuple = descriptorTable.createTupleDescriptor();
for (Column column : table.getFullSchema()) {
SlotDescriptor slotDescriptor = descriptorTable.addSlotDescriptor(olapTuple);
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(column.getType());
slotDescriptor.setColumn(column);
slotDescriptor.setIsNullable(column.isAllowNull());
}
olapTuple.computeMemLayout();
return olapTuple;
}
private static void createOutputFragment(PlanFragment inputFragment, ExecPlan execPlan,
List<ColumnRefOperator> outputColumns,
boolean hasOutputFragment) {
if (inputFragment.getPlanRoot() instanceof ExchangeNode || !inputFragment.isPartitioned() ||
!hasOutputFragment) {
List<Expr> outputExprs = outputColumns.stream().map(variable -> ScalarOperatorToExpr
.buildExecExpression(variable,
new ScalarOperatorToExpr.FormatterContext(execPlan.getColRefToExpr()))
).collect(Collectors.toList());
inputFragment.setOutputExprs(outputExprs);
execPlan.getOutputExprs().addAll(outputExprs);
return;
}
List<Expr> outputExprs = outputColumns.stream().map(variable -> ScalarOperatorToExpr
.buildExecExpression(variable, new ScalarOperatorToExpr.FormatterContext(execPlan.getColRefToExpr())))
.collect(Collectors.toList());
execPlan.getOutputExprs().addAll(outputExprs);
if (!enableComputeNode(execPlan)
&& !inputFragment.hashLocalBucketShuffleRightOrFullJoin(inputFragment.getPlanRoot())
&& execPlan.getScanNodes().stream().allMatch(d -> d instanceof OlapScanNode)
&& execPlan.getScanNodes().stream().map(d -> ((OlapScanNode) d).getScanTabletIds().size())
.reduce(Integer::sum).orElse(2) <= 1) {
inputFragment.setOutputExprs(outputExprs);
return;
}
ExchangeNode exchangeNode =
new ExchangeNode(execPlan.getNextNodeId(), inputFragment.getPlanRoot(), DataPartition.UNPARTITIONED);
exchangeNode.setNumInstances(1);
PlanFragment exchangeFragment =
new PlanFragment(execPlan.getNextFragmentId(), exchangeNode, DataPartition.UNPARTITIONED);
inputFragment.setDestination(exchangeNode);
inputFragment.setOutputPartition(DataPartition.UNPARTITIONED);
exchangeFragment.setOutputExprs(outputExprs);
execPlan.getFragments().add(exchangeFragment);
}
private static boolean useQueryCache(ExecPlan execPlan) {
if (!execPlan.getConnectContext().getSessionVariable().isEnableQueryCache()) {
return false;
}
return true;
}
private static ExecPlan finalizeFragments(ExecPlan execPlan, TResultSinkType resultSinkType) {
List<PlanFragment> fragments = execPlan.getFragments();
for (PlanFragment fragment : fragments) {
fragment.createDataSink(resultSinkType);
}
Collections.reverse(fragments);
boolean shouldClearRuntimeFilters = ConnectContext.get() != null &&
!ConnectContext.get().getSessionVariable().getEnableGlobalRuntimeFilter() &&
ConnectContext.get().getSessionVariable().isEnablePipelineEngine();
for (PlanFragment fragment : fragments) {
fragment.computeLocalRfWaitingSet(fragment.getPlanRoot(), shouldClearRuntimeFilters);
}
if (useQueryCache(execPlan)) {
for (PlanFragment fragment : execPlan.getFragments()) {
FragmentNormalizer normalizer = new FragmentNormalizer(execPlan, fragment);
normalizer.normalize();
}
} else if (ConnectContext.get() != null &&
ConnectContext.get().getSessionVariable().isEnableRuntimeAdaptiveDop()) {
for (PlanFragment fragment : fragments) {
if (fragment.canUseRuntimeAdaptiveDop()) {
fragment.enableAdaptiveDop();
}
}
}
return execPlan;
}
private static void maybeClearOlapScanNodePartitions(PlanFragment fragment) {
List<OlapScanNode> olapScanNodes = fragment.collectOlapScanNodes();
long numNodesWithBucketColumns =
olapScanNodes.stream().filter(node -> !node.getBucketColumns().isEmpty()).count();
boolean needClear = numNodesWithBucketColumns > 0 && numNodesWithBucketColumns < olapScanNodes.size();
if (needClear) {
clearOlapScanNodePartitions(fragment.getPlanRoot());
}
}
/**
* Clear partitionExprs of OlapScanNode (the bucket keys to pass to BE).
* <p>
* When partitionExprs of OlapScanNode are passed to BE, the post operators will use them as
* local shuffle partition exprs.
* Otherwise, the operators will use the original partition exprs (group by keys or join on keys).
* <p>
* The bucket keys can satisfy the required hash property of blocking aggregation except two scenarios:
* - OlapScanNode only has one tablet after pruned.
* - It is executed on the single BE.
* As for these two scenarios, which will generate ScanNode(k1)->LocalShuffle(c1)->BlockingAgg(c1),
* partitionExprs of OlapScanNode must be cleared to make BE use group by keys not bucket keys as
* local shuffle partition exprs.
*
* @param root The root node of the fragment which need to check whether to clear bucket keys of OlapScanNode.
*/
private static void clearOlapScanNodePartitions(PlanNode root) {
if (root instanceof OlapScanNode) {
OlapScanNode scanNode = (OlapScanNode) root;
scanNode.setBucketExprs(Lists.newArrayList());
scanNode.setBucketColumns(Lists.newArrayList());
return;
}
if (root instanceof ExchangeNode) {
return;
}
for (PlanNode child : root.getChildren()) {
clearOlapScanNodePartitions(child);
}
}
private static class PhysicalPlanTranslator extends OptExpressionVisitor<PlanFragment, ExecPlan> {
private final ColumnRefFactory columnRefFactory;
private final IdGenerator<RuntimeFilterId> runtimeFilterIdIdGenerator = RuntimeFilterId.createGenerator();
private boolean canUseLocalShuffleAgg = true;
public PhysicalPlanTranslator(ColumnRefFactory columnRefFactory) {
this.columnRefFactory = columnRefFactory;
}
public PlanFragment translate(OptExpression optExpression, ExecPlan context) {
return visit(optExpression, context);
}
@Override
public PlanFragment visit(OptExpression optExpression, ExecPlan context) {
canUseLocalShuffleAgg &= optExpression.arity() <= 1;
PlanFragment fragment = optExpression.getOp().accept(this, optExpression, context);
Projection projection = (optExpression.getOp()).getProjection();
if (projection == null) {
return fragment;
} else {
return buildProjectNode(optExpression, projection, fragment, context);
}
}
private void setUnUsedOutputColumns(PhysicalOlapScanOperator node, OlapScanNode scanNode,
List<ScalarOperator> predicates, OlapTable referenceTable) {
if (!ConnectContext.get().getSessionVariable().isEnableFilterUnusedColumnsInScanStage()) {
return;
}
MaterializedIndexMeta materializedIndexMeta =
referenceTable.getIndexMetaByIndexId(node.getSelectedIndexId());
if (materializedIndexMeta.getKeysType().isAggregationFamily() && !node.isPreAggregation()) {
return;
}
List<ColumnRefOperator> outputColumns = node.getOutputColumns();
if (outputColumns.isEmpty()) {
return;
}
Set<Integer> outputColumnIds = new HashSet<Integer>();
for (ColumnRefOperator colref : outputColumns) {
outputColumnIds.add(colref.getId());
}
Set<Integer> singlePredColumnIds = new HashSet<Integer>();
Set<Integer> complexPredColumnIds = new HashSet<Integer>();
Set<String> aggOrPrimaryKeyTableValueColumnNames = new HashSet<String>();
if (materializedIndexMeta.getKeysType().isAggregationFamily() ||
materializedIndexMeta.getKeysType() == KeysType.PRIMARY_KEYS) {
aggOrPrimaryKeyTableValueColumnNames =
materializedIndexMeta.getSchema().stream()
.filter(col -> !col.isKey())
.map(Column::getName)
.collect(Collectors.toSet());
}
for (ScalarOperator predicate : predicates) {
ColumnRefSet usedColumns = predicate.getUsedColumns();
if (DecodeVisitor.isSimpleStrictPredicate(predicate)) {
for (int cid : usedColumns.getColumnIds()) {
singlePredColumnIds.add(cid);
}
} else {
for (int cid : usedColumns.getColumnIds()) {
complexPredColumnIds.add(cid);
}
}
}
Set<Integer> unUsedOutputColumnIds = new HashSet<Integer>();
Map<Integer, Integer> dictStringIdToIntIds = node.getDictStringIdToIntIds();
for (Integer cid : singlePredColumnIds) {
Integer newCid = cid;
if (dictStringIdToIntIds.containsKey(cid)) {
newCid = dictStringIdToIntIds.get(cid);
}
if (!complexPredColumnIds.contains(newCid) && !outputColumnIds.contains(newCid)) {
unUsedOutputColumnIds.add(newCid);
}
}
scanNode.setUnUsedOutputStringColumns(unUsedOutputColumnIds, aggOrPrimaryKeyTableValueColumnNames);
}
@Override
public PlanFragment visitPhysicalProject(OptExpression optExpr, ExecPlan context) {
PhysicalProjectOperator node = (PhysicalProjectOperator) optExpr.getOp();
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
Preconditions.checkState(!node.getColumnRefMap().isEmpty());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
Map<SlotId, Expr> commonSubOperatorMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getCommonSubOperatorMap().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(),
node.getCommonSubOperatorMap()));
commonSubOperatorMap.put(new SlotId(entry.getKey().getId()), expr);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setIsNullable(expr.isNullable());
slotDescriptor.setIsMaterialized(false);
slotDescriptor.setType(expr.getType());
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
Map<SlotId, Expr> projectMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getColumnRefMap().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(), node.getColumnRefMap()));
projectMap.put(new SlotId(entry.getKey().getId()), expr);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setIsNullable(expr.isNullable());
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(expr.getType());
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
ProjectNode projectNode =
new ProjectNode(context.getNextNodeId(),
tupleDescriptor,
inputFragment.getPlanRoot(),
projectMap,
commonSubOperatorMap);
projectNode.setHasNullableGenerateChild();
projectNode.computeStatistics(optExpr.getStatistics());
for (SlotId sid : projectMap.keySet()) {
SlotDescriptor slotDescriptor = tupleDescriptor.getSlot(sid.asInt());
slotDescriptor.setIsNullable(slotDescriptor.getIsNullable() | projectNode.isHasNullableGenerateChild());
}
tupleDescriptor.computeMemLayout();
projectNode.setLimit(inputFragment.getPlanRoot().getLimit());
inputFragment.setPlanRoot(projectNode);
return inputFragment;
}
public PlanFragment buildProjectNode(OptExpression optExpression, Projection node, PlanFragment inputFragment,
ExecPlan context) {
if (node == null) {
return inputFragment;
}
Preconditions.checkState(!node.getColumnRefMap().isEmpty());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
Map<SlotId, Expr> commonSubOperatorMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getCommonSubOperatorMap().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(),
node.getCommonSubOperatorMap()));
commonSubOperatorMap.put(new SlotId(entry.getKey().getId()), expr);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setIsNullable(expr.isNullable());
slotDescriptor.setIsMaterialized(false);
slotDescriptor.setType(expr.getType());
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
Map<SlotId, Expr> projectMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getColumnRefMap().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(), node.getColumnRefMap()));
projectMap.put(new SlotId(entry.getKey().getId()), expr);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setIsNullable(expr.isNullable());
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(expr.getType());
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
ProjectNode projectNode =
new ProjectNode(context.getNextNodeId(),
tupleDescriptor,
inputFragment.getPlanRoot(),
projectMap,
commonSubOperatorMap);
projectNode.setHasNullableGenerateChild();
Statistics statistics = optExpression.getStatistics();
Statistics.Builder b = Statistics.builder();
b.setOutputRowCount(statistics.getOutputRowCount());
b.addColumnStatisticsFromOtherStatistic(statistics, new ColumnRefSet(node.getOutputColumns()));
projectNode.computeStatistics(b.build());
for (SlotId sid : projectMap.keySet()) {
SlotDescriptor slotDescriptor = tupleDescriptor.getSlot(sid.asInt());
slotDescriptor.setIsNullable(slotDescriptor.getIsNullable() | projectNode.isHasNullableGenerateChild());
}
tupleDescriptor.computeMemLayout();
projectNode.setLimit(inputFragment.getPlanRoot().getLimit());
inputFragment.setPlanRoot(projectNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalDecode(OptExpression optExpression, ExecPlan context) {
PhysicalDecodeOperator node = (PhysicalDecodeOperator) optExpression.getOp();
PlanFragment inputFragment = visit(optExpression.inputAt(0), context);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
for (TupleId tupleId : inputFragment.getPlanRoot().getTupleIds()) {
TupleDescriptor childTuple = context.getDescTbl().getTupleDesc(tupleId);
ArrayList<SlotDescriptor> slots = childTuple.getSlots();
for (SlotDescriptor slot : slots) {
int slotId = slot.getId().asInt();
boolean isNullable = slot.getIsNullable();
if (node.getDictToStrings().containsKey(slotId)) {
Integer stringSlotId = node.getDictToStrings().get(slotId);
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(stringSlotId));
slotDescriptor.setIsNullable(isNullable);
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(Type.VARCHAR);
context.getColRefToExpr().put(new ColumnRefOperator(stringSlotId, Type.VARCHAR,
"<dict-code>", slotDescriptor.getIsNullable()),
new SlotRef(stringSlotId.toString(), slotDescriptor));
} else {
SlotDescriptor slotDescriptor = new SlotDescriptor(slot.getId(), tupleDescriptor, slot);
tupleDescriptor.addSlot(slotDescriptor);
}
}
}
Map<SlotId, Expr> projectMap = Maps.newHashMap();
for (Map.Entry<ColumnRefOperator, ScalarOperator> entry : node.getStringFunctions().entrySet()) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(entry.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr(),
node.getStringFunctions()));
projectMap.put(new SlotId(entry.getKey().getId()), expr);
Preconditions.checkState(context.getColRefToExpr().containsKey(entry.getKey()));
}
tupleDescriptor.computeMemLayout();
DecodeNode decodeNode = new DecodeNode(context.getNextNodeId(),
tupleDescriptor,
inputFragment.getPlanRoot(),
node.getDictToStrings(), projectMap);
decodeNode.computeStatistics(optExpression.getStatistics());
decodeNode.setLimit(node.getLimit());
inputFragment.setPlanRoot(decodeNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalOlapScan(OptExpression optExpr, ExecPlan context) {
PhysicalOlapScanOperator node = (PhysicalOlapScanOperator) optExpr.getOp();
OlapTable referenceTable = (OlapTable) node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
OlapScanNode scanNode = new OlapScanNode(context.getNextNodeId(), tupleDescriptor, "OlapScanNode");
scanNode.setLimit(node.getLimit());
scanNode.computeStatistics(optExpr.getStatistics());
try {
scanNode.updateScanInfo(node.getSelectedPartitionId(),
node.getSelectedTabletId(),
node.getSelectedIndexId());
long selectedIndexId = node.getSelectedIndexId();
long totalTabletsNum = 0;
long localBeId = -1;
if (Config.enable_local_replica_selection) {
localBeId = GlobalStateMgr.getCurrentSystemInfo()
.getBackendIdByHost(FrontendOptions.getLocalHostAddress());
}
List<Long> selectedNonEmptyPartitionIds = node.getSelectedPartitionId().stream().filter(p -> {
List<Long> selectTabletIds = scanNode.getPartitionToScanTabletMap().get(p);
return selectTabletIds != null && !selectTabletIds.isEmpty();
}).collect(Collectors.toList());
scanNode.setSelectedPartitionIds(selectedNonEmptyPartitionIds);
for (Long partitionId : scanNode.getSelectedPartitionIds()) {
List<Long> selectTabletIds = scanNode.getPartitionToScanTabletMap().get(partitionId);
Preconditions.checkState(selectTabletIds != null && !selectTabletIds.isEmpty());
final Partition partition = referenceTable.getPartition(partitionId);
final MaterializedIndex selectedTable = partition.getIndex(selectedIndexId);
List<Long> allTabletIds = selectedTable.getTabletIdsInOrder();
Map<Long, Integer> tabletId2BucketSeq = Maps.newHashMap();
for (int i = 0; i < allTabletIds.size(); i++) {
tabletId2BucketSeq.put(allTabletIds.get(i), i);
}
totalTabletsNum += selectedTable.getTablets().size();
scanNode.setTabletId2BucketSeq(tabletId2BucketSeq);
List<Tablet> tablets =
selectTabletIds.stream().map(selectedTable::getTablet).collect(Collectors.toList());
scanNode.addScanRangeLocations(partition, selectedTable, tablets, localBeId);
}
scanNode.setTotalTabletsNum(totalTabletsNum);
} catch (UserException e) {
throw new StarRocksPlannerException(
"Build Exec OlapScanNode fail, scan info is invalid," + e.getMessage(),
INTERNAL_ERROR);
}
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
if (slotDescriptor.getOriginType().isComplexType()) {
slotDescriptor.setOriginType(entry.getKey().getType());
slotDescriptor.setType(entry.getKey().getType());
}
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
scanNode.setColumnAccessPaths(node.getColumnAccessPaths());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
for (ScalarOperator predicate : node.getPrunedPartitionPredicates()) {
scanNode.getPrunedPartitionPredicates()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
tupleDescriptor.computeMemLayout();
setUnUsedOutputColumns(node, scanNode, predicates, referenceTable);
scanNode.setIsSortedByKeyPerTablet(node.needSortedByKeyPerTablet());
scanNode.setIsPreAggregation(node.isPreAggregation(), node.getTurnOffReason());
scanNode.setDictStringIdToIntIds(node.getDictStringIdToIntIds());
scanNode.updateAppliedDictStringColumns(node.getGlobalDicts().stream().
map(entry -> entry.first).collect(Collectors.toSet()));
scanNode.setUsePkIndex(node.isUsePkIndex());
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.RANDOM);
fragment.setQueryGlobalDicts(node.getGlobalDicts());
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalMetaScan(OptExpression optExpression, ExecPlan context) {
PhysicalMetaScanOperator scan = (PhysicalMetaScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(scan.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(scan.getTable());
MetaScanNode scanNode =
new MetaScanNode(context.getNextNodeId(),
tupleDescriptor, (OlapTable) scan.getTable(), scan.getAggColumnIdToNames());
scanNode.computeRangeLocations();
scanNode.computeStatistics(optExpression.getStatistics());
for (Map.Entry<ColumnRefOperator, Column> entry : scan.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().getName(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
private void prepareContextSlots(PhysicalScanOperator node, ExecPlan context, TupleDescriptor tupleDescriptor) {
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
if (slotDescriptor.getOriginType().isComplexType()) {
slotDescriptor.setOriginType(entry.getKey().getType());
slotDescriptor.setType(entry.getKey().getType());
}
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
}
private void prepareCommonExpr(HDFSScanNodePredicates scanNodePredicates,
ScanOperatorPredicates predicates, ExecPlan context) {
List<ScalarOperator> noEvalPartitionConjuncts = predicates.getNoEvalPartitionConjuncts();
List<ScalarOperator> nonPartitionConjuncts = predicates.getNonPartitionConjuncts();
List<ScalarOperator> partitionConjuncts = predicates.getPartitionConjuncts();
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator partitionConjunct : partitionConjuncts) {
scanNodePredicates.getPartitionConjuncts().
add(ScalarOperatorToExpr.buildExecExpression(partitionConjunct, formatterContext));
}
for (ScalarOperator noEvalPartitionConjunct : noEvalPartitionConjuncts) {
scanNodePredicates.getNoEvalPartitionConjuncts().
add(ScalarOperatorToExpr.buildExecExpression(noEvalPartitionConjunct, formatterContext));
}
for (ScalarOperator nonPartitionConjunct : nonPartitionConjuncts) {
scanNodePredicates.getNonPartitionConjuncts().
add(ScalarOperatorToExpr.buildExecExpression(nonPartitionConjunct, formatterContext));
}
}
private void prepareMinMaxExpr(HDFSScanNodePredicates scanNodePredicates,
ScanOperatorPredicates predicates, ExecPlan context) {
/*
* populates 'minMaxTuple' with slots for statistics values,
* and populates 'minMaxConjuncts' with conjuncts pointing into the 'minMaxTuple'
*/
List<ScalarOperator> minMaxConjuncts = predicates.getMinMaxConjuncts();
TupleDescriptor minMaxTuple = context.getDescTbl().createTupleDescriptor();
for (ScalarOperator minMaxConjunct : minMaxConjuncts) {
for (ColumnRefOperator columnRefOperator : Utils.extractColumnRef(minMaxConjunct)) {
SlotDescriptor slotDescriptor =
context.getDescTbl()
.addSlotDescriptor(minMaxTuple, new SlotId(columnRefOperator.getId()));
Column column = predicates.getMinMaxColumnRefMap().get(columnRefOperator);
slotDescriptor.setColumn(column);
slotDescriptor.setIsNullable(column.isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr()
.put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDescriptor));
}
}
minMaxTuple.computeMemLayout();
scanNodePredicates.setMinMaxTuple(minMaxTuple);
ScalarOperatorToExpr.FormatterContext minMaxFormatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator minMaxConjunct : minMaxConjuncts) {
scanNodePredicates.getMinMaxConjuncts().
add(ScalarOperatorToExpr.buildExecExpression(minMaxConjunct, minMaxFormatterContext));
}
}
@Override
public PlanFragment visitPhysicalHudiScan(OptExpression optExpression, ExecPlan context) {
PhysicalHudiScanOperator node = (PhysicalHudiScanOperator) optExpression.getOp();
ScanOperatorPredicates predicates = node.getScanOperatorPredicates();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
HudiScanNode hudiScanNode =
new HudiScanNode(context.getNextNodeId(), tupleDescriptor, "HudiScanNode");
hudiScanNode.computeStatistics(optExpression.getStatistics());
try {
HDFSScanNodePredicates scanNodePredicates = hudiScanNode.getScanNodePredicates();
scanNodePredicates.setSelectedPartitionIds(predicates.getSelectedPartitionIds());
scanNodePredicates.setIdToPartitionKey(predicates.getIdToPartitionKey());
hudiScanNode.setupScanRangeLocations(context.getDescTbl());
prepareCommonExpr(scanNodePredicates, predicates, context);
prepareMinMaxExpr(scanNodePredicates, predicates, context);
} catch (Exception e) {
LOG.warn("Hudi scan node get scan range locations failed : " + e);
LOG.warn(e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
hudiScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(hudiScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), hudiScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalHiveScan(OptExpression optExpression, ExecPlan context) {
PhysicalHiveScanOperator node = (PhysicalHiveScanOperator) optExpression.getOp();
ScanOperatorPredicates predicates = node.getScanOperatorPredicates();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
HdfsScanNode hdfsScanNode =
new HdfsScanNode(context.getNextNodeId(), tupleDescriptor, "HdfsScanNode");
hdfsScanNode.computeStatistics(optExpression.getStatistics());
try {
HDFSScanNodePredicates scanNodePredicates = hdfsScanNode.getScanNodePredicates();
scanNodePredicates.setSelectedPartitionIds(predicates.getSelectedPartitionIds());
scanNodePredicates.setIdToPartitionKey(predicates.getIdToPartitionKey());
hdfsScanNode.setupScanRangeLocations(context.getDescTbl());
prepareCommonExpr(scanNodePredicates, predicates, context);
prepareMinMaxExpr(scanNodePredicates, predicates, context);
} catch (Exception e) {
LOG.warn("Hdfs scan node get scan range locations failed : " + e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
hdfsScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(hdfsScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), hdfsScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalFileScan(OptExpression optExpression, ExecPlan context) {
PhysicalFileScanOperator node = (PhysicalFileScanOperator) optExpression.getOp();
ScanOperatorPredicates predicates = node.getScanOperatorPredicates();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
FileTableScanNode fileTableScanNode =
new FileTableScanNode(context.getNextNodeId(), tupleDescriptor, "FileTableScanNode");
fileTableScanNode.computeStatistics(optExpression.getStatistics());
try {
HDFSScanNodePredicates scanNodePredicates = fileTableScanNode.getScanNodePredicates();
fileTableScanNode.setupScanRangeLocations();
prepareCommonExpr(scanNodePredicates, predicates, context);
prepareMinMaxExpr(scanNodePredicates, predicates, context);
} catch (Exception e) {
LOG.warn("Hdfs scan node get scan range locations failed : ", e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
fileTableScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(fileTableScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), fileTableScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalDeltaLakeScan(OptExpression optExpression, ExecPlan context) {
PhysicalDeltaLakeScanOperator node = (PhysicalDeltaLakeScanOperator) optExpression.getOp();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
DeltaLakeScanNode deltaLakeScanNode =
new DeltaLakeScanNode(context.getNextNodeId(), tupleDescriptor, "DeltaLakeScanNode");
deltaLakeScanNode.computeStatistics(optExpression.getStatistics());
try {
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
for (ScalarOperator predicate : predicates) {
deltaLakeScanNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
deltaLakeScanNode.setupScanRangeLocations(context.getDescTbl());
HDFSScanNodePredicates scanNodePredicates = deltaLakeScanNode.getScanNodePredicates();
prepareMinMaxExpr(scanNodePredicates, node.getScanOperatorPredicates(), context);
} catch (AnalysisException e) {
LOG.warn("Delta lake scan node get scan range locations failed : " + e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
deltaLakeScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(deltaLakeScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), deltaLakeScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
public PlanFragment visitPhysicalPaimonScan(OptExpression optExpression, ExecPlan context) {
PhysicalPaimonScanOperator node = (PhysicalPaimonScanOperator) optExpression.getOp();
ScanOperatorPredicates predicates = node.getScanOperatorPredicates();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
PaimonScanNode paimonScanNode =
new PaimonScanNode(context.getNextNodeId(), tupleDescriptor, "PaimonScanNode");
paimonScanNode.computeStatistics(optExpression.getStatistics());
try {
HDFSScanNodePredicates scanNodePredicates = paimonScanNode.getScanNodePredicates();
scanNodePredicates.setSelectedPartitionIds(predicates.getSelectedPartitionIds());
scanNodePredicates.setIdToPartitionKey(predicates.getIdToPartitionKey());
paimonScanNode.setupScanRangeLocations(context.getDescTbl());
prepareCommonExpr(scanNodePredicates, predicates, context);
prepareMinMaxExpr(scanNodePredicates, predicates, context);
} catch (Exception e) {
LOG.warn("Paimon scan node get scan range locations failed : " + e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
paimonScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(paimonScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), paimonScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalIcebergScan(OptExpression optExpression, ExecPlan context) {
PhysicalIcebergScanOperator node = (PhysicalIcebergScanOperator) optExpression.getOp();
Table referenceTable = node.getTable();
context.getDescTbl().addReferencedTable(referenceTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(referenceTable);
prepareContextSlots(node, context, tupleDescriptor);
IcebergScanNode icebergScanNode =
new IcebergScanNode(context.getNextNodeId(), tupleDescriptor, "IcebergScanNode");
icebergScanNode.computeStatistics(optExpression.getStatistics());
try {
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
for (ScalarOperator predicate : predicates) {
icebergScanNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
icebergScanNode.preProcessIcebergPredicate(node.getPredicate());
icebergScanNode.setupScanRangeLocations();
icebergScanNode.appendEqualityColumns(node, columnRefFactory, context);
HDFSScanNodePredicates scanNodePredicates = icebergScanNode.getScanNodePredicates();
prepareMinMaxExpr(scanNodePredicates, node.getScanOperatorPredicates(), context);
} catch (UserException e) {
LOG.warn("Iceberg scan node get scan range locations failed : " + e);
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
icebergScanNode.setLimit(node.getLimit());
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(icebergScanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), icebergScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalSchemaScan(OptExpression optExpression, ExecPlan context) {
PhysicalSchemaScanOperator node = (PhysicalSchemaScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(node.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(node.getTable());
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
SchemaScanNode scanNode = new SchemaScanNode(context.getNextNodeId(), tupleDescriptor);
scanNode.setFrontendIP(FrontendOptions.getLocalHostAddress());
scanNode.setFrontendPort(Config.rpc_port);
scanNode.setUser(context.getConnectContext().getQualifiedUser());
scanNode.setUserIp(context.getConnectContext().getRemoteIP());
scanNode.setLimit(node.getLimit());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
if (!(predicate.getChildren().size() == 2 &&
predicate.getChildren().get(0) instanceof ColumnRefOperator &&
predicate.getChildren().get(1) instanceof ConstantOperator)) {
continue;
}
ColumnRefOperator columnRefOperator = (ColumnRefOperator) predicate.getChildren().get(0);
ConstantOperator constantOperator = (ConstantOperator) predicate.getChildren().get(1);
if (predicate instanceof BinaryPredicateOperator) {
BinaryPredicateOperator binaryPredicateOperator = (BinaryPredicateOperator) predicate;
if (binaryPredicateOperator.getBinaryType() == BinaryType.EQ) {
switch (columnRefOperator.getName()) {
case "TABLE_SCHEMA":
case "DATABASE_NAME":
scanNode.setSchemaDb(constantOperator.getVarchar());
break;
case "TABLE_NAME":
scanNode.setSchemaTable(constantOperator.getVarchar());
break;
case "BE_ID":
scanNode.setBeId(constantOperator.getBigint());
break;
case "TABLE_ID":
scanNode.setTableId(constantOperator.getBigint());
break;
case "PARTITION_ID":
scanNode.setPartitionId(constantOperator.getBigint());
break;
case "TABLET_ID":
scanNode.setTabletId(constantOperator.getBigint());
break;
case "TXN_ID":
scanNode.setTxnId(constantOperator.getBigint());
break;
case "LABEL":
scanNode.setLabel(constantOperator.getVarchar());
break;
case "JOB_ID":
scanNode.setJobId(constantOperator.getBigint());
break;
case "TYPE":
scanNode.setType(constantOperator.getVarchar());
break;
case "STATE":
scanNode.setState(constantOperator.getVarchar());
break;
case "LOG":
scanNode.setLogPattern("^" + constantOperator.getVarchar() + "$");
break;
case "LEVEL":
scanNode.setLogLevel(constantOperator.getVarchar());
break;
default:
break;
}
}
if (columnRefOperator.getName().equals("TIMESTAMP")) {
BinaryType opType = binaryPredicateOperator.getBinaryType();
if (opType == BinaryType.EQ) {
scanNode.setLogStartTs(constantOperator.getBigint());
scanNode.setLogEndTs(constantOperator.getBigint() + 1);
} else if (opType == BinaryType.GT) {
scanNode.setLogStartTs(constantOperator.getBigint() + 1);
} else if (opType == BinaryType.GE) {
scanNode.setLogStartTs(constantOperator.getBigint());
} else if (opType == BinaryType.LT) {
scanNode.setLogEndTs(constantOperator.getBigint());
} else if (opType == BinaryType.LE) {
scanNode.setLogEndTs(constantOperator.getBigint() + 1);
}
}
} else if (predicate instanceof LikePredicateOperator) {
LikePredicateOperator like = (LikePredicateOperator) predicate;
if (columnRefOperator.getName().equals("LOG")) {
if (like.getLikeType() == LikePredicateOperator.LikeType.REGEXP) {
scanNode.setLogPattern(((ConstantOperator) like.getChildren().get(1)).getVarchar());
} else {
throw UnsupportedException.unsupportedException(
"only support `regexp` or `rlike` for log grep");
}
}
}
}
if (scanNode.getTableName().equalsIgnoreCase("load_tracking_logs") && scanNode.getLabel() == null
&& scanNode.getJobId() == null) {
throw UnsupportedException.unsupportedException("load_tracking_logs must specify label or job_id");
}
if (scanNode.isBeSchemaTable()) {
scanNode.computeBeScanRanges();
}
if (scanNode.getLimit() > 0 && predicates.isEmpty()) {
scanNode.setLogLimit(Math.min(scanNode.getLimit(), Config.max_per_node_grep_log_limit));
} else {
scanNode.setLogLimit(Config.max_per_node_grep_log_limit);
}
context.getScanNodes().add(scanNode);
PlanFragment fragment = new PlanFragment(context.getNextFragmentId(), scanNode,
scanNode.isBeSchemaTable() ? DataPartition.RANDOM : DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalMysqlScan(OptExpression optExpression, ExecPlan context) {
PhysicalMysqlScanOperator node = (PhysicalMysqlScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(node.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(node.getTable());
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().getName(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
MysqlScanNode scanNode = new MysqlScanNode(context.getNextNodeId(), tupleDescriptor,
(MysqlTable) node.getTable());
if (node.getTemporalClause() != null) {
scanNode.setTemporalClause(node.getTemporalClause());
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
scanNode.setLimit(node.getLimit());
scanNode.computeColumnsAndFilters();
scanNode.computeStatistics(optExpression.getStatistics());
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalEsScan(OptExpression optExpression, ExecPlan context) {
PhysicalEsScanOperator node = (PhysicalEsScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(node.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(node.getTable());
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
EsScanNode scanNode = new EsScanNode(context.getNextNodeId(), tupleDescriptor, "EsScanNode");
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
scanNode.setLimit(node.getLimit());
scanNode.computeStatistics(optExpression.getStatistics());
try {
scanNode.assignBackends();
} catch (UserException e) {
throw new StarRocksPlannerException(e.getMessage(), INTERNAL_ERROR);
}
scanNode.setShardScanRanges(scanNode.computeShardLocations(node.getSelectedIndex()));
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalJDBCScan(OptExpression optExpression, ExecPlan context) {
PhysicalJDBCScanOperator node = (PhysicalJDBCScanOperator) optExpression.getOp();
context.getDescTbl().addReferencedTable(node.getTable());
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(node.getTable());
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().getName(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
JDBCScanNode scanNode = new JDBCScanNode(context.getNextNodeId(), tupleDescriptor,
(JDBCTable) node.getTable());
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
scanNode.setLimit(node.getLimit());
scanNode.computeColumnsAndFilters();
scanNode.computeStatistics(optExpression.getStatistics());
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalValues(OptExpression optExpr, ExecPlan context) {
PhysicalValuesOperator valuesOperator = (PhysicalValuesOperator) optExpr.getOp();
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
for (ColumnRefOperator columnRefOperator : valuesOperator.getColumnRefSet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(columnRefOperator.getId()));
slotDescriptor.setIsNullable(columnRefOperator.isNullable());
slotDescriptor.setIsMaterialized(true);
slotDescriptor.setType(columnRefOperator.getType());
context.getColRefToExpr()
.put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDescriptor));
}
tupleDescriptor.computeMemLayout();
if (valuesOperator.getRows().isEmpty()) {
EmptySetNode emptyNode = new EmptySetNode(context.getNextNodeId(),
Lists.newArrayList(tupleDescriptor.getId()));
emptyNode.computeStatistics(optExpr.getStatistics());
PlanFragment fragment = new PlanFragment(context.getNextFragmentId(), emptyNode,
DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
} else {
UnionNode unionNode = new UnionNode(context.getNextNodeId(), tupleDescriptor.getId());
unionNode.setLimit(valuesOperator.getLimit());
List<List<Expr>> consts = new ArrayList<>();
for (List<ScalarOperator> row : valuesOperator.getRows()) {
List<Expr> exprRow = new ArrayList<>();
for (ScalarOperator field : row) {
exprRow.add(ScalarOperatorToExpr.buildExecExpression(
field, new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())));
}
consts.add(exprRow);
}
unionNode.setMaterializedConstExprLists_(consts);
unionNode.computeStatistics(optExpr.getStatistics());
/*
* TODO(lhy):
* It doesn't make sense for vectorized execution engines, but it will appear in explain.
* we can delete this when refactoring explain in the future,
*/
consts.forEach(unionNode::addConstExprList);
PlanFragment fragment = new PlanFragment(context.getNextFragmentId(), unionNode,
DataPartition.UNPARTITIONED);
context.getFragments().add(fragment);
return fragment;
}
}
public static boolean hasNoExchangeNodes(PlanNode root) {
if (root instanceof ExchangeNode) {
return false;
}
for (PlanNode childNode : root.getChildren()) {
if (!hasNoExchangeNodes(childNode)) {
return false;
}
}
return true;
}
/**
* Whether all the nodes of the plan tree only contain the specific node types.
*
* @param root The plan tree root.
* @param requiredNodeTypes The specific node type.
* @return true if all the nodes belong to the node types, otherwise false.
*/
private boolean onlyContainNodeTypes(PlanNode root, List<Class<? extends PlanNode>> requiredNodeTypes) {
boolean rootMatched = requiredNodeTypes.stream().anyMatch(type -> type.isInstance(root));
if (!rootMatched) {
return false;
}
for (PlanNode child : root.getChildren()) {
if (!onlyContainNodeTypes(child, requiredNodeTypes)) {
return false;
}
}
return true;
}
/**
* Remove ExchangeNode between AggNode and ScanNode for the single backend.
* <p>
* This is used to generate "ScanNode->LocalShuffle->OnePhaseLocalAgg" for the single backend,
* which contains two steps:
* 1. Ignore the network cost for ExchangeNode when estimating cost model.
* 2. Remove ExchangeNode between AggNode and ScanNode when building fragments.
* <p>
* Specifically, transfer
* (AggNode->ExchangeNode)->([ProjectNode->]ScanNode)
* - *inputFragment sourceFragment
* to
* (AggNode->[ProjectNode->]ScanNode)
* - *sourceFragment
* That is, when matching this fragment pattern, remove inputFragment and return sourceFragment.
*
* @param inputFragment The input fragment to match the above pattern.
* @param context The context of building fragment, which contains all the fragments.
* @return SourceFragment if it matches th pattern, otherwise the original inputFragment.
*/
private PlanFragment removeExchangeNodeForLocalShuffleAgg(PlanFragment inputFragment, ExecPlan context) {
if (ConnectContext.get() == null) {
return inputFragment;
}
if (!canUseLocalShuffleAgg) {
return inputFragment;
}
SessionVariable sessionVariable = ConnectContext.get().getSessionVariable();
boolean enableLocalShuffleAgg = sessionVariable.isEnableLocalShuffleAgg()
&& sessionVariable.isEnablePipelineEngine()
&& GlobalStateMgr.getCurrentSystemInfo().isSingleBackendAndComputeNode();
if (!enableLocalShuffleAgg) {
return inputFragment;
}
if (!(inputFragment.getPlanRoot() instanceof ExchangeNode)) {
return inputFragment;
}
PlanNode sourceFragmentRoot = inputFragment.getPlanRoot().getChild(0);
if (!onlyContainNodeTypes(sourceFragmentRoot, ImmutableList.of(ScanNode.class, ProjectNode.class))) {
return inputFragment;
}
PlanFragment sourceFragment = sourceFragmentRoot.getFragment();
if (sourceFragment instanceof MultiCastPlanFragment) {
return inputFragment;
}
ArrayList<PlanFragment> fragments = context.getFragments();
for (int i = fragments.size() - 1; i >= 0; --i) {
if (fragments.get(i).equals(inputFragment)) {
fragments.remove(i);
break;
}
}
sourceFragment.clearDestination();
sourceFragment.clearOutputPartition();
return sourceFragment;
}
private static class AggregateExprInfo {
public final ArrayList<Expr> groupExpr;
public final ArrayList<FunctionCallExpr> aggregateExpr;
public final ArrayList<Expr> partitionExpr;
public final ArrayList<Expr> intermediateExpr;
public AggregateExprInfo(ArrayList<Expr> groupExpr, ArrayList<FunctionCallExpr> aggregateExpr,
ArrayList<Expr> partitionExpr,
ArrayList<Expr> intermediateExpr) {
this.groupExpr = groupExpr;
this.aggregateExpr = aggregateExpr;
this.partitionExpr = partitionExpr;
this.intermediateExpr = intermediateExpr;
}
}
private AggregateExprInfo buildAggregateTuple(
Map<ColumnRefOperator, CallOperator> aggregations,
List<ColumnRefOperator> groupBys,
List<ColumnRefOperator> partitionBys,
TupleDescriptor outputTupleDesc,
ExecPlan context) {
ArrayList<Expr> groupingExpressions = Lists.newArrayList();
boolean forExchangePerf = aggregations.values().stream().anyMatch(aggFunc ->
aggFunc.getFnName().equals(FunctionSet.EXCHANGE_BYTES) ||
aggFunc.getFnName().equals(FunctionSet.EXCHANGE_SPEED)) &&
ConnectContext.get().getSessionVariable().getNewPlannerAggStage() == 1;
if (!forExchangePerf) {
for (ColumnRefOperator grouping : CollectionUtils.emptyIfNull(groupBys)) {
Expr groupingExpr = ScalarOperatorToExpr.buildExecExpression(grouping,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
groupingExpressions.add(groupingExpr);
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(outputTupleDesc, new SlotId(grouping.getId()));
slotDesc.setType(groupingExpr.getType());
slotDesc.setIsNullable(groupingExpr.isNullable());
slotDesc.setIsMaterialized(true);
}
}
ArrayList<FunctionCallExpr> aggregateExprList = Lists.newArrayList();
ArrayList<Expr> intermediateAggrExprs = Lists.newArrayList();
for (Map.Entry<ColumnRefOperator, CallOperator> aggregation : aggregations.entrySet()) {
FunctionCallExpr aggExpr = (FunctionCallExpr) ScalarOperatorToExpr.buildExecExpression(
aggregation.getValue(), new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
aggregateExprList.add(aggExpr);
SlotDescriptor slotDesc = context.getDescTbl()
.addSlotDescriptor(outputTupleDesc, new SlotId(aggregation.getKey().getId()));
slotDesc.setType(aggregation.getValue().getType());
slotDesc.setIsNullable(aggExpr.isNullable());
slotDesc.setIsMaterialized(true);
context.getColRefToExpr()
.put(aggregation.getKey(), new SlotRef(aggregation.getKey().toString(), slotDesc));
SlotDescriptor intermediateSlotDesc = new SlotDescriptor(slotDesc.getId(), slotDesc.getParent());
AggregateFunction aggrFn = (AggregateFunction) aggExpr.getFn();
Type intermediateType = aggrFn.getIntermediateType() != null ?
aggrFn.getIntermediateType() : aggrFn.getReturnType();
intermediateSlotDesc.setType(intermediateType);
intermediateSlotDesc.setIsNullable(aggrFn.isNullable());
intermediateSlotDesc.setIsMaterialized(true);
SlotRef intermediateSlotRef = new SlotRef(aggregation.getKey().toString(), intermediateSlotDesc);
intermediateAggrExprs.add(intermediateSlotRef);
}
ArrayList<Expr> partitionExpressions = Lists.newArrayList();
for (ColumnRefOperator column : CollectionUtils.emptyIfNull(partitionBys)) {
Expr partitionExpr = ScalarOperatorToExpr.buildExecExpression(column,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(outputTupleDesc, new SlotId(column.getId()));
slotDesc.setType(partitionExpr.getType());
slotDesc.setIsNullable(partitionExpr.isNullable());
slotDesc.setIsMaterialized(true);
context.getColRefToExpr().put(column, new SlotRef(column.toString(), slotDesc));
partitionExpressions.add(new SlotRef(slotDesc));
}
outputTupleDesc.computeMemLayout();
return new AggregateExprInfo(groupingExpressions, aggregateExprList, partitionExpressions,
intermediateAggrExprs);
}
@Override
public PlanFragment visitPhysicalHashAggregate(OptExpression optExpr, ExecPlan context) {
PhysicalHashAggregateOperator node = (PhysicalHashAggregateOperator) optExpr.getOp();
PlanFragment originalInputFragment = visit(optExpr.inputAt(0), context);
PlanFragment inputFragment = removeExchangeNodeForLocalShuffleAgg(originalInputFragment, context);
boolean withLocalShuffle = inputFragment != originalInputFragment;
Map<ColumnRefOperator, CallOperator> aggregations = node.getAggregations();
List<ColumnRefOperator> groupBys = node.getGroupBys();
List<ColumnRefOperator> partitionBys = node.getPartitionByColumns();
TupleDescriptor outputTupleDesc = context.getDescTbl().createTupleDescriptor();
AggregateExprInfo aggExpr =
buildAggregateTuple(aggregations, groupBys, partitionBys, outputTupleDesc, context);
ArrayList<Expr> groupingExpressions = aggExpr.groupExpr;
ArrayList<FunctionCallExpr> aggregateExprList = aggExpr.aggregateExpr;
ArrayList<Expr> partitionExpressions = aggExpr.partitionExpr;
ArrayList<Expr> intermediateAggrExprs = aggExpr.intermediateExpr;
AggregationNode aggregationNode;
if (node.getType().isLocal() && node.isSplit()) {
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.FIRST);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(), aggInfo);
aggregationNode.unsetNeedsFinalize();
aggregationNode.setIsPreagg(node.isUseStreamingPreAgg());
aggregationNode.setIntermediateTuple();
if (!partitionExpressions.isEmpty()) {
inputFragment.setOutputPartition(DataPartition.hashPartitioned(partitionExpressions));
}
if (!withLocalShuffle && !node.isUseStreamingPreAgg() &&
hasColocateOlapScanChildInFragment(aggregationNode)) {
aggregationNode.setColocate(true);
}
} else if (node.getType().isGlobal() || (node.getType().isLocal() && !node.isSplit())) {
if (node.hasSingleDistinct()) {
for (int i = 0; i < aggregateExprList.size(); i++) {
if (i != node.getSingleDistinctFunctionPos()) {
aggregateExprList.get(i).setMergeAggFn();
}
}
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.SECOND);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(),
aggInfo);
} else if (!node.isSplit()) {
rewriteAggDistinctFirstStageFunction(aggregateExprList);
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.FIRST);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(),
aggInfo);
} else {
aggregateExprList.forEach(FunctionCallExpr::setMergeAggFn);
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.SECOND_MERGE);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(),
aggInfo);
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
aggregationNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
aggregationNode.setLimit(node.getLimit());
if (!withLocalShuffle && hasColocateOlapScanChildInFragment(aggregationNode)) {
aggregationNode.setColocate(true);
}
} else if (node.getType().isDistinctGlobal()) {
aggregateExprList.forEach(FunctionCallExpr::setMergeAggFn);
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.FIRST_MERGE);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(), aggInfo);
aggregationNode.unsetNeedsFinalize();
aggregationNode.setIntermediateTuple();
if (!withLocalShuffle && hasColocateOlapScanChildInFragment(aggregationNode)) {
aggregationNode.setColocate(true);
}
} else if (node.getType().isDistinctLocal()) {
for (int i = 0; i < aggregateExprList.size(); i++) {
if (i != node.getSingleDistinctFunctionPos()) {
aggregateExprList.get(i).setMergeAggFn();
}
}
AggregateInfo aggInfo = AggregateInfo.create(
groupingExpressions,
aggregateExprList,
outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.SECOND);
aggregationNode =
new AggregationNode(context.getNextNodeId(), inputFragment.getPlanRoot(), aggInfo);
aggregationNode.unsetNeedsFinalize();
aggregationNode.setIsPreagg(node.isUseStreamingPreAgg());
aggregationNode.setIntermediateTuple();
} else {
throw unsupportedException("Not support aggregate type : " + node.getType());
}
aggregationNode.setUseSortAgg(node.isUseSortAgg());
aggregationNode.setStreamingPreaggregationMode(context.getConnectContext().
getSessionVariable().getStreamingPreaggregationMode());
aggregationNode.setHasNullableGenerateChild();
aggregationNode.computeStatistics(optExpr.getStatistics());
if (node.isOnePhaseAgg() || node.isMergedLocalAgg() || node.getType().isDistinctGlobal()) {
inputFragment.setAssignScanRangesPerDriverSeq(!withLocalShuffle);
aggregationNode.setWithLocalShuffle(withLocalShuffle);
aggregationNode.setIdenticallyDistributed(true);
}
aggregationNode.getAggInfo().setIntermediateAggrExprs(intermediateAggrExprs);
inputFragment.setPlanRoot(aggregationNode);
return inputFragment;
}
public boolean hasColocateOlapScanChildInFragment(PlanNode node) {
if (node instanceof OlapScanNode) {
ColocateTableIndex colocateIndex = GlobalStateMgr.getCurrentColocateIndex();
OlapScanNode scanNode = (OlapScanNode) node;
if (colocateIndex.isColocateTable(scanNode.getOlapTable().getId())) {
return true;
}
}
if (node instanceof ExchangeNode) {
return false;
}
boolean hasOlapScanChild = false;
for (PlanNode child : node.getChildren()) {
hasOlapScanChild |= hasColocateOlapScanChildInFragment(child);
}
return hasOlapScanChild;
}
public void rewriteAggDistinctFirstStageFunction(List<FunctionCallExpr> aggregateExprList) {
int singleDistinctCount = 0;
int singleDistinctIndex = 0;
FunctionCallExpr functionCallExpr = null;
for (int i = 0; i < aggregateExprList.size(); ++i) {
FunctionCallExpr callExpr = aggregateExprList.get(i);
if (callExpr.isDistinct()) {
++singleDistinctCount;
functionCallExpr = callExpr;
singleDistinctIndex = i;
}
}
if (singleDistinctCount == 1) {
FunctionCallExpr replaceExpr = null;
final String functionName = functionCallExpr.getFnName().getFunction();
if (functionName.equalsIgnoreCase(FunctionSet.COUNT)) {
replaceExpr = new FunctionCallExpr(FunctionSet.MULTI_DISTINCT_COUNT, functionCallExpr.getParams());
replaceExpr.setFn(Expr.getBuiltinFunction(FunctionSet.MULTI_DISTINCT_COUNT,
new Type[] {functionCallExpr.getChild(0).getType()},
IS_NONSTRICT_SUPERTYPE_OF));
replaceExpr.getParams().setIsDistinct(false);
} else if (functionName.equalsIgnoreCase(FunctionSet.SUM)) {
replaceExpr = new FunctionCallExpr(FunctionSet.MULTI_DISTINCT_SUM, functionCallExpr.getParams());
Function multiDistinctSum = DecimalV3FunctionAnalyzer.convertSumToMultiDistinctSum(
functionCallExpr.getFn(), functionCallExpr.getChild(0).getType());
replaceExpr.setFn(multiDistinctSum);
replaceExpr.getParams().setIsDistinct(false);
}
Preconditions.checkState(replaceExpr != null);
ExpressionAnalyzer.analyzeExpressionIgnoreSlot(replaceExpr, ConnectContext.get());
aggregateExprList.set(singleDistinctIndex, replaceExpr);
}
}
@Override
public PlanFragment visitPhysicalDistribution(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalDistributionOperator distribution = (PhysicalDistributionOperator) optExpr.getOp();
ExchangeNode exchangeNode = new ExchangeNode(context.getNextNodeId(),
inputFragment.getPlanRoot(), distribution.getDistributionSpec().getType());
DataPartition dataPartition;
if (DistributionSpec.DistributionType.GATHER.equals(distribution.getDistributionSpec().getType())) {
exchangeNode.setNumInstances(1);
dataPartition = DataPartition.UNPARTITIONED;
GatherDistributionSpec spec = (GatherDistributionSpec) distribution.getDistributionSpec();
if (spec.hasLimit()) {
exchangeNode.setLimit(spec.getLimit());
}
} else if (DistributionSpec.DistributionType.BROADCAST
.equals(distribution.getDistributionSpec().getType())) {
exchangeNode.setNumInstances(inputFragment.getPlanRoot().getNumInstances());
dataPartition = DataPartition.UNPARTITIONED;
} else if (DistributionSpec.DistributionType.SHUFFLE.equals(distribution.getDistributionSpec().getType())) {
exchangeNode.setNumInstances(inputFragment.getPlanRoot().getNumInstances());
List<ColumnRefOperator> partitionColumns =
getShuffleColumns((HashDistributionSpec) distribution.getDistributionSpec());
List<Expr> distributeExpressions =
partitionColumns.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
dataPartition = DataPartition.hashPartitioned(distributeExpressions);
} else {
throw new StarRocksPlannerException("Unsupport exchange type : "
+ distribution.getDistributionSpec().getType(), INTERNAL_ERROR);
}
exchangeNode.setDataPartition(dataPartition);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), exchangeNode, dataPartition);
fragment.setQueryGlobalDicts(distribution.getGlobalDicts());
inputFragment.setDestination(exchangeNode);
inputFragment.setOutputPartition(dataPartition);
context.getFragments().add(fragment);
return fragment;
}
@Override
public PlanFragment visitPhysicalTopN(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalTopNOperator topN = (PhysicalTopNOperator) optExpr.getOp();
Preconditions.checkState(topN.getOffset() >= 0);
if (!topN.isSplit()) {
return buildPartialTopNFragment(optExpr, context, topN.getPartitionByColumns(),
topN.getPartitionLimit(), topN.getOrderSpec(),
topN.getTopNType(), topN.getLimit(), topN.getOffset(), inputFragment);
} else {
return buildFinalTopNFragment(context, topN.getTopNType(), topN.getLimit(), topN.getOffset(),
inputFragment, optExpr);
}
}
private PlanFragment buildFinalTopNFragment(ExecPlan context, TopNType topNType, long limit, long offset,
PlanFragment inputFragment,
OptExpression optExpr) {
ExchangeNode exchangeNode = new ExchangeNode(context.getNextNodeId(),
inputFragment.getPlanRoot(),
DistributionSpec.DistributionType.GATHER);
exchangeNode.setNumInstances(1);
DataPartition dataPartition = DataPartition.UNPARTITIONED;
exchangeNode.setDataPartition(dataPartition);
Preconditions.checkState(inputFragment.getPlanRoot() instanceof SortNode);
SortNode sortNode = (SortNode) inputFragment.getPlanRoot();
sortNode.setTopNType(topNType);
exchangeNode.setMergeInfo(sortNode.getSortInfo(), offset);
exchangeNode.computeStatistics(optExpr.getStatistics());
if (TopNType.ROW_NUMBER.equals(topNType)) {
exchangeNode.setLimit(limit);
} else {
exchangeNode.unsetLimit();
}
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), exchangeNode, dataPartition);
inputFragment.setDestination(exchangeNode);
inputFragment.setOutputPartition(dataPartition);
fragment.setQueryGlobalDicts(inputFragment.getQueryGlobalDicts());
context.getFragments().add(fragment);
return fragment;
}
private PlanFragment buildPartialTopNFragment(OptExpression optExpr, ExecPlan context,
List<ColumnRefOperator> partitionByColumns, long partitionLimit,
OrderSpec orderSpec, TopNType topNType, long limit, long offset,
PlanFragment inputFragment) {
List<Expr> resolvedTupleExprs = Lists.newArrayList();
List<Expr> partitionExprs = Lists.newArrayList();
List<Expr> sortExprs = Lists.newArrayList();
TupleDescriptor sortTuple = context.getDescTbl().createTupleDescriptor();
if (CollectionUtils.isNotEmpty(partitionByColumns)) {
for (ColumnRefOperator partitionByColumn : partitionByColumns) {
Expr expr = ScalarOperatorToExpr.buildExecExpression(partitionByColumn,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
partitionExprs.add(expr);
}
}
for (Ordering ordering : orderSpec.getOrderDescs()) {
Expr sortExpr = ScalarOperatorToExpr.buildExecExpression(ordering.getColumnRef(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(sortTuple, new SlotId(ordering.getColumnRef().getId()));
slotDesc.initFromExpr(sortExpr);
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(sortExpr.isNullable());
slotDesc.setType(sortExpr.getType());
context.getColRefToExpr()
.put(ordering.getColumnRef(), new SlotRef(ordering.getColumnRef().toString(), slotDesc));
resolvedTupleExprs.add(sortExpr);
sortExprs.add(new SlotRef(slotDesc));
}
ColumnRefSet columnRefSet = optExpr.inputAt(0).getLogicalProperty().getOutputColumns();
for (int i = 0; i < columnRefSet.getColumnIds().length; ++i) {
/*
* Add column not be used in ordering
*/
ColumnRefOperator columnRef = columnRefFactory.getColumnRef(columnRefSet.getColumnIds()[i]);
if (orderSpec.getOrderDescs().stream().map(Ordering::getColumnRef)
.noneMatch(c -> c.equals(columnRef))) {
Expr outputExpr = ScalarOperatorToExpr.buildExecExpression(columnRef,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(sortTuple, new SlotId(columnRef.getId()));
slotDesc.initFromExpr(outputExpr);
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(outputExpr.isNullable());
slotDesc.setType(outputExpr.getType());
context.getColRefToExpr().put(columnRef, new SlotRef(columnRef.toString(), slotDesc));
resolvedTupleExprs.add(outputExpr);
}
}
sortTuple.computeMemLayout();
SortInfo sortInfo = new SortInfo(partitionExprs, partitionLimit, sortExprs,
orderSpec.getOrderDescs().stream().map(Ordering::isAscending).collect(Collectors.toList()),
orderSpec.getOrderDescs().stream().map(Ordering::isNullsFirst).collect(Collectors.toList()));
sortInfo.setMaterializedTupleInfo(sortTuple, resolvedTupleExprs);
SortNode sortNode = new SortNode(
context.getNextNodeId(),
inputFragment.getPlanRoot(),
sortInfo,
limit != Operator.DEFAULT_LIMIT,
limit == Operator.DEFAULT_LIMIT,
0);
sortNode.setTopNType(topNType);
sortNode.setLimit(limit);
sortNode.setOffset(offset);
sortNode.resolvedTupleExprs = resolvedTupleExprs;
sortNode.setHasNullableGenerateChild();
sortNode.computeStatistics(optExpr.getStatistics());
if (shouldBuildGlobalRuntimeFilter()) {
sortNode.buildRuntimeFilters(runtimeFilterIdIdGenerator, context.getDescTbl());
}
inputFragment.setPlanRoot(sortNode);
return inputFragment;
}
private void setJoinPushDown(JoinNode node) {
node.setIsPushDown(ConnectContext.get().getSessionVariable().isHashJoinPushDownRightTable()
&& (node.getJoinOp().isInnerJoin() || node.getJoinOp().isLeftSemiJoin() ||
node.getJoinOp().isRightJoin()));
}
private boolean shouldBuildGlobalRuntimeFilter() {
return ConnectContext.get() != null &&
(ConnectContext.get().getSessionVariable().getEnableGlobalRuntimeFilter() ||
ConnectContext.get().getSessionVariable().isEnablePipelineEngine());
}
@Override
public PlanFragment visitPhysicalHashJoin(OptExpression optExpr, ExecPlan context) {
PlanFragment leftFragment = visit(optExpr.inputAt(0), context);
PlanFragment rightFragment = visit(optExpr.inputAt(1), context);
return visitPhysicalJoin(leftFragment, rightFragment, optExpr, context);
}
private List<Expr> extractConjuncts(ScalarOperator predicate, ExecPlan context) {
return Utils.extractConjuncts(predicate).stream()
.map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
}
private void setNullableForJoin(JoinOperator joinOperator,
PlanFragment leftFragment, PlanFragment rightFragment, ExecPlan context) {
Set<TupleId> nullableTupleIds = new HashSet<>();
nullableTupleIds.addAll(leftFragment.getPlanRoot().getNullableTupleIds());
nullableTupleIds.addAll(rightFragment.getPlanRoot().getNullableTupleIds());
if (joinOperator.isLeftOuterJoin()) {
nullableTupleIds.addAll(rightFragment.getPlanRoot().getTupleIds());
} else if (joinOperator.isRightOuterJoin()) {
nullableTupleIds.addAll(leftFragment.getPlanRoot().getTupleIds());
} else if (joinOperator.isFullOuterJoin()) {
nullableTupleIds.addAll(leftFragment.getPlanRoot().getTupleIds());
nullableTupleIds.addAll(rightFragment.getPlanRoot().getTupleIds());
}
for (TupleId tupleId : nullableTupleIds) {
TupleDescriptor tupleDescriptor = context.getDescTbl().getTupleDesc(tupleId);
tupleDescriptor.getSlots().forEach(slot -> slot.setIsNullable(true));
tupleDescriptor.computeMemLayout();
}
}
@Override
public PlanFragment visitPhysicalNestLoopJoin(OptExpression optExpr, ExecPlan context) {
PhysicalJoinOperator node = (PhysicalJoinOperator) optExpr.getOp();
PlanFragment leftFragment = visit(optExpr.inputAt(0), context);
PlanFragment rightFragment = visit(optExpr.inputAt(1), context);
List<Expr> conjuncts = extractConjuncts(node.getPredicate(), context);
List<Expr> joinOnConjuncts = extractConjuncts(node.getOnPredicate(), context);
List<Expr> probePartitionByExprs = Lists.newArrayList();
DistributionSpec leftDistributionSpec =
optExpr.getRequiredProperties().get(0).getDistributionProperty().getSpec();
DistributionSpec rightDistributionSpec =
optExpr.getRequiredProperties().get(1).getDistributionProperty().getSpec();
if (leftDistributionSpec instanceof HashDistributionSpec &&
rightDistributionSpec instanceof HashDistributionSpec) {
probePartitionByExprs = getShuffleExprs((HashDistributionSpec) leftDistributionSpec, context);
}
setNullableForJoin(node.getJoinType(), leftFragment, rightFragment, context);
NestLoopJoinNode joinNode = new NestLoopJoinNode(context.getNextNodeId(),
leftFragment.getPlanRoot(), rightFragment.getPlanRoot(),
null, node.getJoinType(), Lists.newArrayList(), joinOnConjuncts);
joinNode.setLimit(node.getLimit());
joinNode.computeStatistics(optExpr.getStatistics());
joinNode.addConjuncts(conjuncts);
joinNode.setProbePartitionByExprs(probePartitionByExprs);
rightFragment.getPlanRoot().setFragment(leftFragment);
context.getFragments().remove(rightFragment);
context.getFragments().remove(leftFragment);
context.getFragments().add(leftFragment);
leftFragment.setPlanRoot(joinNode);
leftFragment.addChildren(rightFragment.getChildren());
if (!(joinNode.getChild(1) instanceof ExchangeNode)) {
joinNode.setReplicated(true);
}
if (shouldBuildGlobalRuntimeFilter()) {
joinNode.buildRuntimeFilters(runtimeFilterIdIdGenerator, context.getDescTbl());
}
leftFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
return leftFragment;
}
@Override
public PlanFragment visitPhysicalMergeJoin(OptExpression optExpr, ExecPlan context) {
PlanFragment leftFragment = visit(optExpr.inputAt(0), context);
PlanFragment rightFragment = visit(optExpr.inputAt(1), context);
PlanNode leftPlanRoot = leftFragment.getPlanRoot();
PlanNode rightPlanRoot = rightFragment.getPlanRoot();
OptExpression leftExpression = optExpr.inputAt(0);
OptExpression rightExpression = optExpr.inputAt(1);
boolean needDealSort = leftExpression.getInputs().size() > 0 && rightExpression.getInputs().size() > 0;
if (needDealSort) {
optExpr.setChild(0, leftExpression.inputAt(0));
optExpr.setChild(1, rightExpression.inputAt(0));
leftFragment.setPlanRoot(leftPlanRoot.getChild(0));
rightFragment.setPlanRoot(rightPlanRoot.getChild(0));
}
PlanFragment planFragment = visitPhysicalJoin(leftFragment, rightFragment, optExpr, context);
if (needDealSort) {
leftExpression.setChild(0, optExpr.inputAt(0));
rightExpression.setChild(0, optExpr.inputAt(1));
optExpr.setChild(0, leftExpression);
optExpr.setChild(1, rightExpression);
planFragment.getPlanRoot().setChild(0, leftPlanRoot);
planFragment.getPlanRoot().setChild(1, rightPlanRoot);
}
return planFragment;
}
private List<ColumnRefOperator> getShuffleColumns(HashDistributionSpec spec) {
List<DistributionCol> columns = spec.getShuffleColumns();
Preconditions.checkState(!columns.isEmpty());
List<ColumnRefOperator> shuffleColumns = new ArrayList<>();
for (DistributionCol column : columns) {
shuffleColumns.add(columnRefFactory.getColumnRef(column.getColId()));
}
return shuffleColumns;
}
private List<Expr> getShuffleExprs(HashDistributionSpec hashDistributionSpec, ExecPlan context) {
List<ColumnRefOperator> shuffleColumns = getShuffleColumns(hashDistributionSpec);
return shuffleColumns.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
}
private PlanFragment visitPhysicalJoin(PlanFragment leftFragment, PlanFragment rightFragment,
OptExpression optExpr, ExecPlan context) {
PhysicalJoinOperator node = (PhysicalJoinOperator) optExpr.getOp();
JoinOperator joinOperator = node.getJoinType();
Preconditions.checkState(!joinOperator.isCrossJoin(), "should not be cross join");
PlanNode leftFragmentPlanRoot = leftFragment.getPlanRoot();
PlanNode rightFragmentPlanRoot = rightFragment.getPlanRoot();
if (leftFragmentPlanRoot instanceof DecodeNode) {
leftFragmentPlanRoot = leftFragmentPlanRoot.getChild(0);
}
if (rightFragmentPlanRoot instanceof DecodeNode) {
rightFragmentPlanRoot = rightFragmentPlanRoot.getChild(0);
}
List<Expr> probePartitionByExprs = Lists.newArrayList();
DistributionSpec leftDistributionSpec =
optExpr.getRequiredProperties().get(0).getDistributionProperty().getSpec();
DistributionSpec rightDistributionSpec =
optExpr.getRequiredProperties().get(1).getDistributionProperty().getSpec();
if (leftDistributionSpec instanceof HashDistributionSpec &&
rightDistributionSpec instanceof HashDistributionSpec) {
probePartitionByExprs = getShuffleExprs((HashDistributionSpec) leftDistributionSpec, context);
}
JoinNode.DistributionMode distributionMode =
inferDistributionMode(optExpr, leftFragmentPlanRoot, rightFragmentPlanRoot);
JoinExprInfo joinExpr = buildJoinExpr(optExpr, context);
List<Expr> eqJoinConjuncts = joinExpr.eqJoinConjuncts;
List<Expr> otherJoinConjuncts = joinExpr.otherJoin;
List<Expr> conjuncts = joinExpr.conjuncts;
setNullableForJoin(joinOperator, leftFragment, rightFragment, context);
JoinNode joinNode;
if (node instanceof PhysicalHashJoinOperator) {
joinNode = new HashJoinNode(
context.getNextNodeId(),
leftFragment.getPlanRoot(), rightFragment.getPlanRoot(),
joinOperator, eqJoinConjuncts, otherJoinConjuncts);
} else if (node instanceof PhysicalMergeJoinOperator) {
joinNode = new MergeJoinNode(
context.getNextNodeId(),
leftFragment.getPlanRoot(), rightFragment.getPlanRoot(),
joinOperator, eqJoinConjuncts, otherJoinConjuncts);
} else {
throw new StarRocksPlannerException("unknown join operator: " + node, INTERNAL_ERROR);
}
fillSlotsInfo(node.getProjection(), joinNode, optExpr, joinExpr.requiredColsForFilter);
joinNode.setDistributionMode(distributionMode);
joinNode.getConjuncts().addAll(conjuncts);
joinNode.setLimit(node.getLimit());
joinNode.computeStatistics(optExpr.getStatistics());
joinNode.setProbePartitionByExprs(probePartitionByExprs);
if (shouldBuildGlobalRuntimeFilter()) {
joinNode.buildRuntimeFilters(runtimeFilterIdIdGenerator, context.getDescTbl());
}
return buildJoinFragment(context, leftFragment, rightFragment, distributionMode, joinNode);
}
private boolean isExchangeWithDistributionType(PlanNode node, DistributionSpec.DistributionType expectedType) {
if (!(node instanceof ExchangeNode)) {
return false;
}
ExchangeNode exchangeNode = (ExchangeNode) node;
return Objects.equals(exchangeNode.getDistributionType(), expectedType);
}
private boolean isColocateJoin(OptExpression optExpression) {
return optExpression.getRequiredProperties().stream().allMatch(
physicalPropertySet -> {
if (!physicalPropertySet.getDistributionProperty().isShuffle()) {
return false;
}
HashDistributionDesc.SourceType hashSourceType =
((HashDistributionSpec) (physicalPropertySet.getDistributionProperty().getSpec()))
.getHashDistributionDesc().getSourceType();
return hashSourceType.equals(HashDistributionDesc.SourceType.LOCAL);
});
}
public boolean isShuffleJoin(OptExpression optExpression) {
return optExpression.getRequiredProperties().stream().allMatch(
physicalPropertySet -> {
if (!physicalPropertySet.getDistributionProperty().isShuffle()) {
return false;
}
HashDistributionDesc.SourceType hashSourceType =
((HashDistributionSpec) (physicalPropertySet.getDistributionProperty().getSpec()))
.getHashDistributionDesc().getSourceType();
return hashSourceType.equals(HashDistributionDesc.SourceType.SHUFFLE_JOIN) ||
hashSourceType.equals(HashDistributionDesc.SourceType.SHUFFLE_ENFORCE) ||
hashSourceType.equals(HashDistributionDesc.SourceType.SHUFFLE_AGG);
});
}
public PlanFragment computeBucketShufflePlanFragment(ExecPlan context,
PlanFragment stayFragment,
PlanFragment removeFragment, JoinNode hashJoinNode) {
hashJoinNode.setLocalHashBucket(true);
hashJoinNode.setPartitionExprs(removeFragment.getDataPartition().getPartitionExprs());
removeFragment.getChild(0)
.setOutputPartition(new DataPartition(TPartitionType.BUCKET_SHUFFLE_HASH_PARTITIONED,
removeFragment.getDataPartition().getPartitionExprs()));
context.getFragments().remove(removeFragment);
context.getFragments().remove(stayFragment);
context.getFragments().add(stayFragment);
stayFragment.setPlanRoot(hashJoinNode);
stayFragment.addChildren(removeFragment.getChildren());
stayFragment.mergeQueryGlobalDicts(removeFragment.getQueryGlobalDicts());
return stayFragment;
}
public PlanFragment computeShuffleHashBucketPlanFragment(ExecPlan context,
PlanFragment stayFragment,
PlanFragment removeFragment,
JoinNode hashJoinNode) {
hashJoinNode.setPartitionExprs(removeFragment.getDataPartition().getPartitionExprs());
DataPartition dataPartition = new DataPartition(TPartitionType.HASH_PARTITIONED,
removeFragment.getDataPartition().getPartitionExprs());
removeFragment.getChild(0).setOutputPartition(dataPartition);
context.getFragments().remove(removeFragment);
context.getFragments().remove(stayFragment);
context.getFragments().add(stayFragment);
stayFragment.setPlanRoot(hashJoinNode);
stayFragment.addChildren(removeFragment.getChildren());
stayFragment.mergeQueryGlobalDicts(removeFragment.getQueryGlobalDicts());
return stayFragment;
}
@Override
public PlanFragment visitPhysicalAssertOneRow(OptExpression optExpression, ExecPlan context) {
PlanFragment inputFragment = visit(optExpression.inputAt(0), context);
for (TupleId id : inputFragment.getPlanRoot().getTupleIds()) {
context.getDescTbl().getTupleDesc(id).getSlots().forEach(s -> s.setIsNullable(true));
}
PhysicalAssertOneRowOperator assertOneRow = (PhysicalAssertOneRowOperator) optExpression.getOp();
AssertNumRowsNode node =
new AssertNumRowsNode(context.getNextNodeId(), inputFragment.getPlanRoot(),
new AssertNumRowsElement(assertOneRow.getCheckRows(), assertOneRow.getTips(),
assertOneRow.getAssertion()));
node.computeStatistics(optExpression.getStatistics());
inputFragment.setPlanRoot(node);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalAnalytic(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalWindowOperator node = (PhysicalWindowOperator) optExpr.getOp();
List<Expr> analyticFnCalls = new ArrayList<>();
TupleDescriptor outputTupleDesc = context.getDescTbl().createTupleDescriptor();
for (Map.Entry<ColumnRefOperator, CallOperator> analyticCall : node.getAnalyticCall().entrySet()) {
Expr analyticFunction = ScalarOperatorToExpr.buildExecExpression(analyticCall.getValue(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr()));
analyticFnCalls.add(analyticFunction);
SlotDescriptor slotDesc = context.getDescTbl()
.addSlotDescriptor(outputTupleDesc, new SlotId(analyticCall.getKey().getId()));
slotDesc.setType(analyticFunction.getType());
slotDesc.setIsNullable(analyticFunction.isNullable());
slotDesc.setIsMaterialized(true);
context.getColRefToExpr()
.put(analyticCall.getKey(), new SlotRef(analyticCall.getKey().toString(), slotDesc));
}
List<Expr> partitionExprs =
node.getPartitionExpressions().stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
List<OrderByElement> orderByElements = node.getOrderByElements().stream().map(e -> new OrderByElement(
ScalarOperatorToExpr.buildExecExpression(e.getColumnRef(),
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())),
e.isAscending(), e.isNullsFirst())).collect(Collectors.toList());
AnalyticEvalNode analyticEvalNode = new AnalyticEvalNode(
context.getNextNodeId(),
inputFragment.getPlanRoot(),
analyticFnCalls,
partitionExprs,
orderByElements,
node.getAnalyticWindow(),
node.isUseHashBasedPartition(),
null, outputTupleDesc, null, null,
context.getDescTbl().createTupleDescriptor());
analyticEvalNode.setSubstitutedPartitionExprs(partitionExprs);
analyticEvalNode.setLimit(node.getLimit());
analyticEvalNode.setHasNullableGenerateChild();
analyticEvalNode.computeStatistics(optExpr.getStatistics());
if (hasColocateOlapScanChildInFragment(analyticEvalNode)) {
analyticEvalNode.setColocate(true);
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
analyticEvalNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
PlanNode root = inputFragment.getPlanRoot();
if (root instanceof SortNode) {
SortNode sortNode = (SortNode) root;
sortNode.setAnalyticPartitionExprs(analyticEvalNode.getPartitionExprs());
}
inputFragment.setPlanRoot(analyticEvalNode);
return inputFragment;
}
private PlanFragment buildSetOperation(OptExpression optExpr, ExecPlan context, OperatorType operatorType) {
PhysicalSetOperation setOperation = (PhysicalSetOperation) optExpr.getOp();
TupleDescriptor setOperationTuple = context.getDescTbl().createTupleDescriptor();
for (ColumnRefOperator columnRefOperator : setOperation.getOutputColumnRefOp()) {
SlotDescriptor slotDesc = context.getDescTbl()
.addSlotDescriptor(setOperationTuple, new SlotId(columnRefOperator.getId()));
slotDesc.setType(columnRefOperator.getType());
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(columnRefOperator.isNullable());
context.getColRefToExpr().put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDesc));
}
SetOperationNode setOperationNode;
boolean isUnion = false;
if (operatorType.equals(OperatorType.PHYSICAL_UNION)) {
isUnion = true;
setOperationNode = new UnionNode(context.getNextNodeId(), setOperationTuple.getId());
setOperationNode.setFirstMaterializedChildIdx_(optExpr.arity());
} else if (operatorType.equals(OperatorType.PHYSICAL_EXCEPT)) {
setOperationNode = new ExceptNode(context.getNextNodeId(), setOperationTuple.getId());
} else if (operatorType.equals(OperatorType.PHYSICAL_INTERSECT)) {
setOperationNode = new IntersectNode(context.getNextNodeId(), setOperationTuple.getId());
} else {
throw new StarRocksPlannerException("Unsupported set operation", INTERNAL_ERROR);
}
List<Map<Integer, Integer>> outputSlotIdToChildSlotIdMaps = new ArrayList<>();
for (int childIdx = 0; childIdx < optExpr.arity(); ++childIdx) {
Map<Integer, Integer> slotIdMap = new HashMap<>();
List<ColumnRefOperator> childOutput = setOperation.getChildOutputColumns().get(childIdx);
Preconditions.checkState(childOutput.size() == setOperation.getOutputColumnRefOp().size());
for (int columnIdx = 0; columnIdx < setOperation.getOutputColumnRefOp().size(); ++columnIdx) {
Integer resultColumnIdx = setOperation.getOutputColumnRefOp().get(columnIdx).getId();
slotIdMap.put(resultColumnIdx, childOutput.get(columnIdx).getId());
}
outputSlotIdToChildSlotIdMaps.add(slotIdMap);
Preconditions.checkState(slotIdMap.size() == setOperation.getOutputColumnRefOp().size());
}
setOperationNode.setOutputSlotIdToChildSlotIdMaps(outputSlotIdToChildSlotIdMaps);
Preconditions.checkState(optExpr.getInputs().size() == setOperation.getChildOutputColumns().size());
PlanFragment setOperationFragment =
new PlanFragment(context.getNextFragmentId(), setOperationNode, DataPartition.RANDOM);
List<List<Expr>> materializedResultExprLists = Lists.newArrayList();
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (int i = 0; i < optExpr.getInputs().size(); i++) {
List<ColumnRefOperator> childOutput = setOperation.getChildOutputColumns().get(i);
PlanFragment fragment = visit(optExpr.getInputs().get(i), context);
List<Expr> materializedExpressions = Lists.newArrayList();
for (ColumnRefOperator ref : childOutput) {
materializedExpressions.add(ScalarOperatorToExpr.buildExecExpression(ref, formatterContext));
}
materializedResultExprLists.add(materializedExpressions);
if (isUnion) {
fragment.setOutputPartition(DataPartition.RANDOM);
} else {
fragment.setOutputPartition(DataPartition.hashPartitioned(materializedExpressions));
}
ExchangeNode exchangeNode =
new ExchangeNode(context.getNextNodeId(), fragment.getPlanRoot(), fragment.getDataPartition());
exchangeNode.setFragment(setOperationFragment);
fragment.setDestination(exchangeNode);
setOperationNode.addChild(exchangeNode);
}
setOperationNode.setHasNullableGenerateChild();
List<Expr> setOutputList = Lists.newArrayList();
for (ColumnRefOperator columnRefOperator : setOperation.getOutputColumnRefOp()) {
SlotDescriptor slotDesc = context.getDescTbl().getSlotDesc(new SlotId(columnRefOperator.getId()));
slotDesc.setIsNullable(slotDesc.getIsNullable() | setOperationNode.isHasNullableGenerateChild());
setOutputList.add(new SlotRef(String.valueOf(columnRefOperator.getId()), slotDesc));
}
setOperationTuple.computeMemLayout();
setOperationNode.setSetOperationOutputList(setOutputList);
setOperationNode.setMaterializedResultExprLists_(materializedResultExprLists);
setOperationNode.setLimit(setOperation.getLimit());
setOperationNode.computeStatistics(optExpr.getStatistics());
context.getFragments().add(setOperationFragment);
return setOperationFragment;
}
@Override
public PlanFragment visitPhysicalUnion(OptExpression optExpr, ExecPlan context) {
return buildSetOperation(optExpr, context, OperatorType.PHYSICAL_UNION);
}
@Override
public PlanFragment visitPhysicalExcept(OptExpression optExpr, ExecPlan context) {
return buildSetOperation(optExpr, context, OperatorType.PHYSICAL_EXCEPT);
}
@Override
public PlanFragment visitPhysicalIntersect(OptExpression optExpr, ExecPlan context) {
return buildSetOperation(optExpr, context, OperatorType.PHYSICAL_INTERSECT);
}
@Override
public PlanFragment visitPhysicalRepeat(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalRepeatOperator repeatOperator = (PhysicalRepeatOperator) optExpr.getOp();
TupleDescriptor outputGroupingTuple = context.getDescTbl().createTupleDescriptor();
for (ColumnRefOperator columnRefOperator : repeatOperator.getOutputGrouping()) {
SlotDescriptor slotDesc = context.getDescTbl()
.addSlotDescriptor(outputGroupingTuple, new SlotId(columnRefOperator.getId()));
slotDesc.setType(columnRefOperator.getType());
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(columnRefOperator.isNullable());
context.getColRefToExpr().put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDesc));
}
outputGroupingTuple.computeMemLayout();
List<Set<Integer>> repeatSlotIdList = new ArrayList<>();
for (List<ColumnRefOperator> repeat : repeatOperator.getRepeatColumnRef()) {
repeatSlotIdList.add(
repeat.stream().map(ColumnRefOperator::getId).collect(Collectors.toSet()));
}
RepeatNode repeatNode = new RepeatNode(
context.getNextNodeId(),
inputFragment.getPlanRoot(),
outputGroupingTuple,
repeatSlotIdList,
repeatOperator.getGroupingIds());
List<ScalarOperator> predicates = Utils.extractConjuncts(repeatOperator.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
repeatNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
repeatNode.computeStatistics(optExpr.getStatistics());
inputFragment.setPlanRoot(repeatNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalFilter(OptExpression optExpr, ExecPlan context) {
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
PhysicalFilterOperator filter = (PhysicalFilterOperator) optExpr.getOp();
List<Expr> predicates = Utils.extractConjuncts(filter.getPredicate()).stream()
.map(d -> ScalarOperatorToExpr.buildExecExpression(d,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
SelectNode selectNode =
new SelectNode(context.getNextNodeId(), inputFragment.getPlanRoot(), predicates);
selectNode.setLimit(filter.getLimit());
selectNode.computeStatistics(optExpr.getStatistics());
inputFragment.setPlanRoot(selectNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalTableFunction(OptExpression optExpression, ExecPlan context) {
PlanFragment inputFragment = visit(optExpression.inputAt(0), context);
PhysicalTableFunctionOperator physicalTableFunction = (PhysicalTableFunctionOperator) optExpression.getOp();
TupleDescriptor udtfOutputTuple = context.getDescTbl().createTupleDescriptor();
for (ColumnRefOperator columnRefOperator : physicalTableFunction.getOutputColRefs()) {
SlotDescriptor slotDesc =
context.getDescTbl().addSlotDescriptor(udtfOutputTuple, new SlotId(columnRefOperator.getId()));
slotDesc.setType(columnRefOperator.getType());
slotDesc.setIsMaterialized(true);
slotDesc.setIsNullable(columnRefOperator.isNullable());
context.getColRefToExpr().put(columnRefOperator, new SlotRef(columnRefOperator.toString(), slotDesc));
}
udtfOutputTuple.computeMemLayout();
TableFunctionNode tableFunctionNode = new TableFunctionNode(context.getNextNodeId(),
inputFragment.getPlanRoot(),
udtfOutputTuple,
physicalTableFunction.getFn(),
physicalTableFunction.getFnParamColumnRefs().stream().map(ColumnRefOperator::getId)
.collect(Collectors.toList()),
physicalTableFunction.getOuterColRefs().stream().map(ColumnRefOperator::getId)
.collect(Collectors.toList()),
physicalTableFunction.getFnResultColRefs().stream().map(ColumnRefOperator::getId)
.collect(Collectors.toList())
);
tableFunctionNode.computeStatistics(optExpression.getStatistics());
tableFunctionNode.setLimit(physicalTableFunction.getLimit());
inputFragment.setPlanRoot(tableFunctionNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalLimit(OptExpression optExpression, ExecPlan context) {
return visit(optExpression.inputAt(0), context);
}
@Override
public PlanFragment visitPhysicalCTEConsume(OptExpression optExpression, ExecPlan context) {
PhysicalCTEConsumeOperator consume = (PhysicalCTEConsumeOperator) optExpression.getOp();
int cteId = consume.getCteId();
MultiCastPlanFragment cteFragment = (MultiCastPlanFragment) context.getCteProduceFragments().get(cteId);
ExchangeNode exchangeNode = new ExchangeNode(context.getNextNodeId(),
cteFragment.getPlanRoot(), DistributionSpec.DistributionType.SHUFFLE);
exchangeNode.setReceiveColumns(consume.getCteOutputColumnRefMap().values().stream()
.map(ColumnRefOperator::getId).collect(Collectors.toList()));
exchangeNode.setDataPartition(cteFragment.getDataPartition());
exchangeNode.setNumInstances(cteFragment.getPlanRoot().getNumInstances());
PlanFragment consumeFragment = new PlanFragment(context.getNextFragmentId(), exchangeNode,
cteFragment.getDataPartition());
Map<ColumnRefOperator, ScalarOperator> projectMap = Maps.newHashMap();
projectMap.putAll(consume.getCteOutputColumnRefMap());
consumeFragment = buildProjectNode(optExpression, new Projection(projectMap), consumeFragment, context);
consumeFragment.setQueryGlobalDicts(cteFragment.getQueryGlobalDicts());
consumeFragment.setLoadGlobalDicts(cteFragment.getLoadGlobalDicts());
if (consume.getPredicate() != null) {
List<Expr> predicates = Utils.extractConjuncts(consume.getPredicate()).stream()
.map(d -> ScalarOperatorToExpr.buildExecExpression(d,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
SelectNode selectNode =
new SelectNode(context.getNextNodeId(), consumeFragment.getPlanRoot(), predicates);
selectNode.computeStatistics(optExpression.getStatistics());
consumeFragment.setPlanRoot(selectNode);
}
if (consume.hasLimit()) {
consumeFragment.getPlanRoot().setLimit(consume.getLimit());
}
cteFragment.getDestNodeList().add(exchangeNode);
consumeFragment.addChild(cteFragment);
context.getFragments().add(consumeFragment);
return consumeFragment;
}
@Override
public PlanFragment visitPhysicalCTEProduce(OptExpression optExpression, ExecPlan context) {
PlanFragment child = visit(optExpression.inputAt(0), context);
int cteId = ((PhysicalCTEProduceOperator) optExpression.getOp()).getCteId();
context.getFragments().remove(child);
MultiCastPlanFragment cteProduce = new MultiCastPlanFragment(child);
List<Expr> outputs = Lists.newArrayList();
optExpression.getOutputColumns().getStream()
.forEach(i -> outputs.add(context.getColRefToExpr().get(columnRefFactory.getColumnRef(i))));
cteProduce.setOutputExprs(outputs);
context.getCteProduceFragments().put(cteId, cteProduce);
context.getFragments().add(cteProduce);
return child;
}
@Override
public PlanFragment visitPhysicalCTEAnchor(OptExpression optExpression, ExecPlan context) {
visit(optExpression.inputAt(0), context);
return visit(optExpression.inputAt(1), context);
}
@Override
public PlanFragment visitPhysicalNoCTE(OptExpression optExpression, ExecPlan context) {
return visit(optExpression.inputAt(0), context);
}
static class JoinExprInfo {
public final List<Expr> eqJoinConjuncts;
public final List<Expr> otherJoin;
public final List<Expr> conjuncts;
public final ColumnRefSet requiredColsForFilter;
public JoinExprInfo(List<Expr> eqJoinConjuncts, List<Expr> otherJoin, List<Expr> conjuncts,
ColumnRefSet requiredColsForFilter) {
this.eqJoinConjuncts = eqJoinConjuncts;
this.otherJoin = otherJoin;
this.conjuncts = conjuncts;
this.requiredColsForFilter = requiredColsForFilter;
}
}
private JoinExprInfo buildJoinExpr(OptExpression optExpr, ExecPlan context) {
ScalarOperator predicate = optExpr.getOp().getPredicate();
ScalarOperator onPredicate;
if (optExpr.getOp() instanceof PhysicalJoinOperator) {
onPredicate = ((PhysicalJoinOperator) optExpr.getOp()).getOnPredicate();
} else if (optExpr.getOp() instanceof PhysicalStreamJoinOperator) {
onPredicate = ((PhysicalStreamJoinOperator) optExpr.getOp()).getOnPredicate();
} else {
throw new IllegalStateException("not supported join " + optExpr.getOp());
}
List<ScalarOperator> onPredicates = Utils.extractConjuncts(onPredicate);
ColumnRefSet leftChildColumns = optExpr.inputAt(0).getOutputColumns();
ColumnRefSet rightChildColumns = optExpr.inputAt(1).getOutputColumns();
List<BinaryPredicateOperator> eqOnPredicates = JoinHelper.getEqualsPredicate(
leftChildColumns, rightChildColumns, onPredicates);
Preconditions.checkState(!eqOnPredicates.isEmpty(), "must be eq-join");
for (BinaryPredicateOperator s : eqOnPredicates) {
if (!optExpr.inputAt(0).getLogicalProperty().getOutputColumns()
.containsAll(s.getChild(0).getUsedColumns())) {
s.swap();
}
}
List<Expr> eqJoinConjuncts =
eqOnPredicates.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
for (Expr expr : eqJoinConjuncts) {
if (expr.isConstant()) {
throw unsupportedException("Support join on constant predicate later");
}
}
List<ScalarOperator> otherJoin = Utils.extractConjuncts(onPredicate);
otherJoin.removeAll(eqOnPredicates);
List<Expr> otherJoinConjuncts = otherJoin.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
List<ScalarOperator> predicates = Utils.extractConjuncts(predicate);
List<Expr> conjuncts = predicates.stream().map(e -> ScalarOperatorToExpr.buildExecExpression(e,
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr())))
.collect(Collectors.toList());
ColumnRefSet requiredColsForFilter = new ColumnRefSet();
otherJoin.stream().forEach(e -> requiredColsForFilter.union(e.getUsedColumns()));
predicates.stream().forEach(e -> requiredColsForFilter.union(e.getUsedColumns()));
return new JoinExprInfo(eqJoinConjuncts, otherJoinConjuncts, conjuncts, requiredColsForFilter);
}
@Override
public PlanFragment visitPhysicalStreamJoin(OptExpression optExpr, ExecPlan context) {
PhysicalStreamJoinOperator node = (PhysicalStreamJoinOperator) optExpr.getOp();
PlanFragment leftFragment = visit(optExpr.inputAt(0), context);
PlanFragment rightFragment = visit(optExpr.inputAt(1), context);
ColumnRefSet leftChildColumns = optExpr.inputAt(0).getLogicalProperty().getOutputColumns();
ColumnRefSet rightChildColumns = optExpr.inputAt(1).getLogicalProperty().getOutputColumns();
if (!node.getJoinType().isInnerJoin()) {
throw new NotImplementedException("Only inner join is supported");
}
JoinOperator joinOperator = node.getJoinType();
PlanNode leftFragmentPlanRoot = leftFragment.getPlanRoot();
PlanNode rightFragmentPlanRoot = rightFragment.getPlanRoot();
JoinNode.DistributionMode distributionMode = JoinNode.DistributionMode.SHUFFLE_HASH_BUCKET;
JoinExprInfo joinExpr = buildJoinExpr(optExpr, context);
List<Expr> eqJoinConjuncts = joinExpr.eqJoinConjuncts;
List<Expr> otherJoinConjuncts = joinExpr.otherJoin;
List<Expr> conjuncts = joinExpr.conjuncts;
List<PlanFragment> nullablePlanFragments = new ArrayList<>();
if (joinOperator.isLeftOuterJoin()) {
nullablePlanFragments.add(rightFragment);
} else if (joinOperator.isRightOuterJoin()) {
nullablePlanFragments.add(leftFragment);
} else if (joinOperator.isFullOuterJoin()) {
nullablePlanFragments.add(leftFragment);
nullablePlanFragments.add(rightFragment);
}
for (PlanFragment planFragment : nullablePlanFragments) {
for (TupleId tupleId : planFragment.getPlanRoot().getTupleIds()) {
context.getDescTbl().getTupleDesc(tupleId).getSlots().forEach(slot -> slot.setIsNullable(true));
}
}
JoinNode joinNode =
new StreamJoinNode(context.getNextNodeId(), leftFragmentPlanRoot, rightFragmentPlanRoot,
node.getJoinType(), eqJoinConjuncts, otherJoinConjuncts);
fillSlotsInfo(node.getProjection(), joinNode, optExpr, joinExpr.requiredColsForFilter);
joinNode.setDistributionMode(distributionMode);
joinNode.getConjuncts().addAll(conjuncts);
joinNode.setLimit(node.getLimit());
joinNode.computeStatistics(optExpr.getStatistics());
return buildJoinFragment(context, leftFragment, rightFragment, distributionMode, joinNode);
}
@NotNull
private PlanFragment buildJoinFragment(ExecPlan context, PlanFragment leftFragment, PlanFragment rightFragment,
JoinNode.DistributionMode distributionMode, JoinNode joinNode) {
if (distributionMode.equals(JoinNode.DistributionMode.BROADCAST)) {
setJoinPushDown(joinNode);
rightFragment.getPlanRoot().setFragment(leftFragment);
context.getFragments().remove(rightFragment);
context.getFragments().remove(leftFragment);
context.getFragments().add(leftFragment);
leftFragment.setPlanRoot(joinNode);
leftFragment.addChildren(rightFragment.getChildren());
leftFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
return leftFragment;
} else if (distributionMode.equals(JoinNode.DistributionMode.PARTITIONED)) {
DataPartition lhsJoinPartition = new DataPartition(TPartitionType.HASH_PARTITIONED,
leftFragment.getDataPartition().getPartitionExprs());
DataPartition rhsJoinPartition = new DataPartition(TPartitionType.HASH_PARTITIONED,
rightFragment.getDataPartition().getPartitionExprs());
leftFragment.getChild(0).setOutputPartition(lhsJoinPartition);
rightFragment.getChild(0).setOutputPartition(rhsJoinPartition);
context.getFragments().remove(leftFragment);
context.getFragments().remove(rightFragment);
PlanFragment joinFragment = new PlanFragment(context.getNextFragmentId(),
joinNode, lhsJoinPartition);
joinFragment.addChildren(leftFragment.getChildren());
joinFragment.addChildren(rightFragment.getChildren());
joinFragment.mergeQueryGlobalDicts(leftFragment.getQueryGlobalDicts());
joinFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
context.getFragments().add(joinFragment);
return joinFragment;
} else if (distributionMode.equals(JoinNode.DistributionMode.COLOCATE) ||
distributionMode.equals(JoinNode.DistributionMode.REPLICATED)) {
if (distributionMode.equals(JoinNode.DistributionMode.COLOCATE)) {
joinNode.setColocate(true, "");
} else {
joinNode.setReplicated(true);
}
setJoinPushDown(joinNode);
joinNode.setChild(0, leftFragment.getPlanRoot());
joinNode.setChild(1, rightFragment.getPlanRoot());
leftFragment.setPlanRoot(joinNode);
leftFragment.addChildren(rightFragment.getChildren());
context.getFragments().remove(rightFragment);
context.getFragments().remove(leftFragment);
context.getFragments().add(leftFragment);
leftFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
return leftFragment;
} else if (distributionMode.equals(JoinNode.DistributionMode.SHUFFLE_HASH_BUCKET)) {
setJoinPushDown(joinNode);
if (!(leftFragment.getPlanRoot() instanceof ExchangeNode) &&
!(rightFragment.getPlanRoot() instanceof ExchangeNode)) {
joinNode.setChild(0, leftFragment.getPlanRoot());
joinNode.setChild(1, rightFragment.getPlanRoot());
leftFragment.setPlanRoot(joinNode);
leftFragment.addChildren(rightFragment.getChildren());
context.getFragments().remove(rightFragment);
context.getFragments().remove(leftFragment);
context.getFragments().add(leftFragment);
leftFragment.mergeQueryGlobalDicts(rightFragment.getQueryGlobalDicts());
return leftFragment;
} else if (leftFragment.getPlanRoot() instanceof ExchangeNode &&
!(rightFragment.getPlanRoot() instanceof ExchangeNode)) {
return computeShuffleHashBucketPlanFragment(context, rightFragment,
leftFragment, joinNode);
} else {
return computeShuffleHashBucketPlanFragment(context, leftFragment,
rightFragment, joinNode);
}
} else {
setJoinPushDown(joinNode);
if (leftFragment.getPlanRoot() instanceof ExchangeNode &&
!(rightFragment.getPlanRoot() instanceof ExchangeNode)) {
leftFragment = computeBucketShufflePlanFragment(context, rightFragment,
leftFragment, joinNode);
} else {
leftFragment = computeBucketShufflePlanFragment(context, leftFragment,
rightFragment, joinNode);
}
return leftFragment;
}
}
@NotNull
private JoinNode.DistributionMode inferDistributionMode(OptExpression optExpr, PlanNode leftFragmentPlanRoot,
PlanNode rightFragmentPlanRoot) {
JoinNode.DistributionMode distributionMode;
if (isExchangeWithDistributionType(leftFragmentPlanRoot, DistributionSpec.DistributionType.SHUFFLE) &&
isExchangeWithDistributionType(rightFragmentPlanRoot,
DistributionSpec.DistributionType.SHUFFLE)) {
distributionMode = JoinNode.DistributionMode.PARTITIONED;
} else if (isExchangeWithDistributionType(rightFragmentPlanRoot,
DistributionSpec.DistributionType.BROADCAST)) {
distributionMode = JoinNode.DistributionMode.BROADCAST;
} else if (!(leftFragmentPlanRoot instanceof ExchangeNode) &&
!(rightFragmentPlanRoot instanceof ExchangeNode)) {
if (isColocateJoin(optExpr)) {
distributionMode = HashJoinNode.DistributionMode.COLOCATE;
} else if (ConnectContext.get().getSessionVariable().isEnableReplicationJoin() &&
rightFragmentPlanRoot.canDoReplicatedJoin()) {
distributionMode = JoinNode.DistributionMode.REPLICATED;
} else if (isShuffleJoin(optExpr)) {
distributionMode = JoinNode.DistributionMode.SHUFFLE_HASH_BUCKET;
} else {
Preconditions.checkState(false, "Must be colocate/bucket/replicate join");
distributionMode = JoinNode.DistributionMode.COLOCATE;
}
} else if (isShuffleJoin(optExpr)) {
distributionMode = JoinNode.DistributionMode.SHUFFLE_HASH_BUCKET;
} else {
distributionMode = JoinNode.DistributionMode.LOCAL_HASH_BUCKET;
}
return distributionMode;
}
@Override
public PlanFragment visitPhysicalStreamAgg(OptExpression optExpr, ExecPlan context) {
PhysicalStreamAggOperator node = (PhysicalStreamAggOperator) optExpr.getOp();
PlanFragment inputFragment = visit(optExpr.inputAt(0), context);
TupleDescriptor outputTupleDesc = context.getDescTbl().createTupleDescriptor();
AggregateExprInfo aggExpr =
buildAggregateTuple(node.getAggregations(), node.getGroupBys(), null, outputTupleDesc, context);
AggregateInfo aggInfo =
AggregateInfo.create(aggExpr.groupExpr, aggExpr.aggregateExpr, outputTupleDesc, outputTupleDesc,
AggregateInfo.AggPhase.FIRST);
StreamAggNode aggNode = new StreamAggNode(context.getNextNodeId(), inputFragment.getPlanRoot(), aggInfo);
aggNode.setHasNullableGenerateChild();
aggNode.computeStatistics(optExpr.getStatistics());
inputFragment.setPlanRoot(aggNode);
return inputFragment;
}
@Override
public PlanFragment visitPhysicalStreamScan(OptExpression optExpr, ExecPlan context) {
PhysicalStreamScanOperator node = (PhysicalStreamScanOperator) optExpr.getOp();
OlapTable scanTable = (OlapTable) node.getTable();
context.getDescTbl().addReferencedTable(scanTable);
TupleDescriptor tupleDescriptor = context.getDescTbl().createTupleDescriptor();
tupleDescriptor.setTable(scanTable);
BinlogScanNode binlogScanNode = new BinlogScanNode(context.getNextNodeId(), tupleDescriptor);
binlogScanNode.computeStatistics(optExpr.getStatistics());
try {
binlogScanNode.computeScanRanges();
} catch (UserException e) {
throw new StarRocksPlannerException(
"Failed to compute scan ranges for StreamScanNode, " + e.getMessage(), INTERNAL_ERROR);
}
for (Map.Entry<ColumnRefOperator, Column> entry : node.getColRefToColumnMetaMap().entrySet()) {
SlotDescriptor slotDescriptor =
context.getDescTbl().addSlotDescriptor(tupleDescriptor, new SlotId(entry.getKey().getId()));
slotDescriptor.setColumn(entry.getValue());
slotDescriptor.setIsNullable(entry.getValue().isAllowNull());
slotDescriptor.setIsMaterialized(true);
context.getColRefToExpr().put(entry.getKey(), new SlotRef(entry.getKey().toString(), slotDescriptor));
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
binlogScanNode.getConjuncts()
.add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
tupleDescriptor.computeMemLayout();
context.getScanNodes().add(binlogScanNode);
PlanFragment fragment = new PlanFragment(context.getNextFragmentId(), binlogScanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
private void fillSlotsInfo(Projection projection, JoinNode joinNode, OptExpression optExpr,
ColumnRefSet requiredColsForFilter) {
ColumnRefSet outputCols = new ColumnRefSet();
if (projection != null) {
for (ScalarOperator s : projection.getColumnRefMap().values()) {
outputCols.union(s.getUsedColumns());
}
for (ScalarOperator s : projection.getCommonSubOperatorMap().values()) {
outputCols.union(s.getUsedColumns());
}
outputCols.except(new ArrayList<>(projection.getCommonSubOperatorMap().keySet()));
outputCols.union(requiredColsForFilter);
if (outputCols.isEmpty()) {
outputCols.union(optExpr.inputAt(1).getOutputColumns().getFirstId());
}
joinNode.setOutputSlots(outputCols.getStream().collect(Collectors.toList()));
}
}
@Override
public PlanFragment visitPhysicalTableFunctionTableScan(OptExpression optExpression, ExecPlan context) {
PhysicalTableFunctionTableScanOperator node =
(PhysicalTableFunctionTableScanOperator) optExpression.getOp();
TableFunctionTable table = (TableFunctionTable) node.getTable();
TupleDescriptor tupleDesc = buildTupleDesc(context, table);
List<List<TBrokerFileStatus>> files = new ArrayList<>();
files.add(table.fileList());
FileScanNode scanNode = new FileScanNode(context.getNextNodeId(), tupleDesc,
"FileScanNode", files, table.fileList().size());
List<BrokerFileGroup> fileGroups = new ArrayList<>();
try {
BrokerFileGroup grp = new BrokerFileGroup(table);
fileGroups.add(grp);
} catch (UserException e) {
throw new StarRocksPlannerException(
"Build Exec FileScanNode fail, scan info is invalid," + e.getMessage(),
INTERNAL_ERROR);
}
prepareContextSlots(node, context, tupleDesc);
scanNode.setLoadInfo(-1, -1, table, new BrokerDesc(table.getProperties()), fileGroups, false, 1);
scanNode.setUseVectorizedLoad(true);
Analyzer analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), context.getConnectContext());
try {
scanNode.init(analyzer);
scanNode.finalizeStats(analyzer);
} catch (UserException e) {
throw new StarRocksPlannerException(
"Build Exec FileScanNode fail, scan info is invalid," + e.getMessage(),
INTERNAL_ERROR);
}
List<ScalarOperator> predicates = Utils.extractConjuncts(node.getPredicate());
ScalarOperatorToExpr.FormatterContext formatterContext =
new ScalarOperatorToExpr.FormatterContext(context.getColRefToExpr());
for (ScalarOperator predicate : predicates) {
scanNode.getConjuncts().add(ScalarOperatorToExpr.buildExecExpression(predicate, formatterContext));
}
scanNode.setLimit(node.getLimit());
scanNode.computeStatistics(optExpression.getStatistics());
context.getScanNodes().add(scanNode);
PlanFragment fragment =
new PlanFragment(context.getNextFragmentId(), scanNode, DataPartition.RANDOM);
context.getFragments().add(fragment);
return fragment;
}
}
} |
This will disable it for CD? | private boolean enabled(ZoneId zone) {
return system.isPublic() || Flags.ENABLE_ONPREM_TENANT_S3_ARCHIVE.bindTo(flagSource)
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.value();
} | return system.isPublic() || Flags.ENABLE_ONPREM_TENANT_S3_ARCHIVE.bindTo(flagSource) | private boolean enabled(ZoneId zone) {
return system.isPublic() || Flags.ENABLE_ONPREM_TENANT_S3_ARCHIVE.bindTo(flagSource)
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.value();
} | class CuratorArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, about 550 bytes for non-tenant related policies. Each tenant
* needs about 500 + len(role_arn) bytes, we limit role_arn to 100 characters, so we can
* fit about (20k - 550) / 600 ~ 32 tenants per bucket.
*/
private final static int TENANTS_PER_BUCKET = 30;
/**
* Archive URIs are often requested because they are returned in /application/v4 API. Since they
* never change, it's safe to cache them and only update on misses
*/
private final Map<ZoneId, Map<TenantName, String>> archiveUriCache = new ConcurrentHashMap<>();
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final FlagSource flagSource;
private final SystemName system;
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.flagSource = controller.flagSource();
this.system = controller.zoneRegistry().system();
}
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
if (enabled(zoneId)) {
return Optional.of(URI.create(Text.format("s3:
} else {
return Optional.empty();
}
}
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
return getBucketNameFromCache(zoneId, tenant)
.or(() -> findAndUpdateArchiveUriCache(zoneId, tenant, buckets(zoneId)))
.orElseGet(() -> assignToBucket(zoneId, tenant));
}
private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(buckets(zoneId));
return findAndUpdateArchiveUriCache(zoneId, tenant, zoneBuckets)
.orElseGet(() -> {
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
zoneBuckets.remove(unfilled);
zoneBuckets.add(unfilled.withTenant(tenant));
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return unfilled.bucketName();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId).withTenant(tenant);
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
updateArchiveUriCache(zoneId, zoneBuckets);
return newBucket.bucketName();
});
}
}
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
private Optional<String> findAndUpdateArchiveUriCache(ZoneId zoneId, TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
Optional<String> bucketName = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().contains(tenant))
.findAny()
.map(ArchiveBucket::bucketName);
if (bucketName.isPresent()) updateArchiveUriCache(zoneId, zoneBuckets);
return bucketName;
}
private Optional<String> getBucketNameFromCache(ZoneId zoneId, TenantName tenantName) {
return Optional.ofNullable(archiveUriCache.get(zoneId)).map(map -> map.get(tenantName));
}
private void updateArchiveUriCache(ZoneId zoneId, Set<ArchiveBucket> zoneBuckets) {
Map<TenantName, String> bucketNameByTenant = zoneBuckets.stream()
.flatMap(bucket -> bucket.tenants().stream()
.map(tenant -> Map.entry(tenant, bucket.bucketName())))
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
archiveUriCache.put(zoneId, bucketNameByTenant);
}
} | class CuratorArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, about 550 bytes for non-tenant related policies. Each tenant
* needs about 500 + len(role_arn) bytes, we limit role_arn to 100 characters, so we can
* fit about (20k - 550) / 600 ~ 32 tenants per bucket.
*/
private final static int TENANTS_PER_BUCKET = 30;
/**
* Archive URIs are often requested because they are returned in /application/v4 API. Since they
* never change, it's safe to cache them and only update on misses
*/
private final Map<ZoneId, Map<TenantName, String>> archiveUriCache = new ConcurrentHashMap<>();
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final FlagSource flagSource;
private final SystemName system;
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.flagSource = controller.flagSource();
this.system = controller.zoneRegistry().system();
}
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
if (enabled(zoneId)) {
return Optional.of(URI.create(Text.format("s3:
} else {
return Optional.empty();
}
}
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
return getBucketNameFromCache(zoneId, tenant)
.or(() -> findAndUpdateArchiveUriCache(zoneId, tenant, buckets(zoneId)))
.orElseGet(() -> assignToBucket(zoneId, tenant));
}
private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(buckets(zoneId));
return findAndUpdateArchiveUriCache(zoneId, tenant, zoneBuckets)
.orElseGet(() -> {
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
zoneBuckets.remove(unfilled);
zoneBuckets.add(unfilled.withTenant(tenant));
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return unfilled.bucketName();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId).withTenant(tenant);
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
updateArchiveUriCache(zoneId, zoneBuckets);
return newBucket.bucketName();
});
}
}
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
private Optional<String> findAndUpdateArchiveUriCache(ZoneId zoneId, TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
Optional<String> bucketName = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().contains(tenant))
.findAny()
.map(ArchiveBucket::bucketName);
if (bucketName.isPresent()) updateArchiveUriCache(zoneId, zoneBuckets);
return bucketName;
}
private Optional<String> getBucketNameFromCache(ZoneId zoneId, TenantName tenantName) {
return Optional.ofNullable(archiveUriCache.get(zoneId)).map(map -> map.get(tenantName));
}
private void updateArchiveUriCache(ZoneId zoneId, Set<ArchiveBucket> zoneBuckets) {
Map<TenantName, String> bucketNameByTenant = zoneBuckets.stream()
.flatMap(bucket -> bucket.tenants().stream()
.map(tenant -> Map.entry(tenant, bucket.bucketName())))
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
archiveUriCache.put(zoneId, bucketNameByTenant);
}
} |
It's already effectively disabled as the archive maintainer is disabled through feature flag in cd. | private boolean enabled(ZoneId zone) {
return system.isPublic() || Flags.ENABLE_ONPREM_TENANT_S3_ARCHIVE.bindTo(flagSource)
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.value();
} | return system.isPublic() || Flags.ENABLE_ONPREM_TENANT_S3_ARCHIVE.bindTo(flagSource) | private boolean enabled(ZoneId zone) {
return system.isPublic() || Flags.ENABLE_ONPREM_TENANT_S3_ARCHIVE.bindTo(flagSource)
.with(FetchVector.Dimension.ZONE_ID, zone.value())
.value();
} | class CuratorArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, about 550 bytes for non-tenant related policies. Each tenant
* needs about 500 + len(role_arn) bytes, we limit role_arn to 100 characters, so we can
* fit about (20k - 550) / 600 ~ 32 tenants per bucket.
*/
private final static int TENANTS_PER_BUCKET = 30;
/**
* Archive URIs are often requested because they are returned in /application/v4 API. Since they
* never change, it's safe to cache them and only update on misses
*/
private final Map<ZoneId, Map<TenantName, String>> archiveUriCache = new ConcurrentHashMap<>();
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final FlagSource flagSource;
private final SystemName system;
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.flagSource = controller.flagSource();
this.system = controller.zoneRegistry().system();
}
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
if (enabled(zoneId)) {
return Optional.of(URI.create(Text.format("s3:
} else {
return Optional.empty();
}
}
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
return getBucketNameFromCache(zoneId, tenant)
.or(() -> findAndUpdateArchiveUriCache(zoneId, tenant, buckets(zoneId)))
.orElseGet(() -> assignToBucket(zoneId, tenant));
}
private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(buckets(zoneId));
return findAndUpdateArchiveUriCache(zoneId, tenant, zoneBuckets)
.orElseGet(() -> {
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
zoneBuckets.remove(unfilled);
zoneBuckets.add(unfilled.withTenant(tenant));
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return unfilled.bucketName();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId).withTenant(tenant);
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
updateArchiveUriCache(zoneId, zoneBuckets);
return newBucket.bucketName();
});
}
}
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
private Optional<String> findAndUpdateArchiveUriCache(ZoneId zoneId, TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
Optional<String> bucketName = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().contains(tenant))
.findAny()
.map(ArchiveBucket::bucketName);
if (bucketName.isPresent()) updateArchiveUriCache(zoneId, zoneBuckets);
return bucketName;
}
private Optional<String> getBucketNameFromCache(ZoneId zoneId, TenantName tenantName) {
return Optional.ofNullable(archiveUriCache.get(zoneId)).map(map -> map.get(tenantName));
}
private void updateArchiveUriCache(ZoneId zoneId, Set<ArchiveBucket> zoneBuckets) {
Map<TenantName, String> bucketNameByTenant = zoneBuckets.stream()
.flatMap(bucket -> bucket.tenants().stream()
.map(tenant -> Map.entry(tenant, bucket.bucketName())))
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
archiveUriCache.put(zoneId, bucketNameByTenant);
}
} | class CuratorArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
* Policy size limit is 20kb, about 550 bytes for non-tenant related policies. Each tenant
* needs about 500 + len(role_arn) bytes, we limit role_arn to 100 characters, so we can
* fit about (20k - 550) / 600 ~ 32 tenants per bucket.
*/
private final static int TENANTS_PER_BUCKET = 30;
/**
* Archive URIs are often requested because they are returned in /application/v4 API. Since they
* never change, it's safe to cache them and only update on misses
*/
private final Map<ZoneId, Map<TenantName, String>> archiveUriCache = new ConcurrentHashMap<>();
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
private final FlagSource flagSource;
private final SystemName system;
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
this.flagSource = controller.flagSource();
this.system = controller.zoneRegistry().system();
}
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
if (enabled(zoneId)) {
return Optional.of(URI.create(Text.format("s3:
} else {
return Optional.empty();
}
}
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
return getBucketNameFromCache(zoneId, tenant)
.or(() -> findAndUpdateArchiveUriCache(zoneId, tenant, buckets(zoneId)))
.orElseGet(() -> assignToBucket(zoneId, tenant));
}
private String assignToBucket(ZoneId zoneId, TenantName tenant) {
try (var lock = curatorDb.lockArchiveBuckets(zoneId)) {
Set<ArchiveBucket> zoneBuckets = new HashSet<>(buckets(zoneId));
return findAndUpdateArchiveUriCache(zoneId, tenant, zoneBuckets)
.orElseGet(() -> {
Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET)
.findAny();
if (unfilledBucket.isPresent()) {
var unfilled = unfilledBucket.get();
zoneBuckets.remove(unfilled);
zoneBuckets.add(unfilled.withTenant(tenant));
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
return unfilled.bucketName();
}
var newBucket = archiveService.createArchiveBucketFor(zoneId).withTenant(tenant);
zoneBuckets.add(newBucket);
curatorDb.writeArchiveBuckets(zoneId, zoneBuckets);
updateArchiveUriCache(zoneId, zoneBuckets);
return newBucket.bucketName();
});
}
}
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
private Optional<String> findAndUpdateArchiveUriCache(ZoneId zoneId, TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
Optional<String> bucketName = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().contains(tenant))
.findAny()
.map(ArchiveBucket::bucketName);
if (bucketName.isPresent()) updateArchiveUriCache(zoneId, zoneBuckets);
return bucketName;
}
private Optional<String> getBucketNameFromCache(ZoneId zoneId, TenantName tenantName) {
return Optional.ofNullable(archiveUriCache.get(zoneId)).map(map -> map.get(tenantName));
}
private void updateArchiveUriCache(ZoneId zoneId, Set<ArchiveBucket> zoneBuckets) {
Map<TenantName, String> bucketNameByTenant = zoneBuckets.stream()
.flatMap(bucket -> bucket.tenants().stream()
.map(tenant -> Map.entry(tenant, bucket.bucketName())))
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
archiveUriCache.put(zoneId, bucketNameByTenant);
}
} |
Note that you could do `mock(Sleeper.class)` here, and let Sleeper be a class with the Sleeper.DEFAULT implementation. This would avoid the interface, 2 constants, and 2 implementations (as top-level declarations in the Sleeper.java file, which I have not encountered elsewhere in our code). | void generates_jvm_dump_from_request() throws IOException {
ContainerOperations operations = mock(ContainerOperations.class);
when(operations.executeCommandInContainerAsRoot(any(), any()))
.thenAnswer(invocation -> {
Files.createFile(tmpDirectory.resolve("vespa-service-dump/" + JvmDumpProducer.NAME + "/heap.bin"));
Files.createFile(tmpDirectory.resolve("vespa-service-dump/" + JvmDumpProducer.NAME + "/jstack"));
return new CommandResult(null, 0, "result");
});
SyncClient syncClient = createSyncClientMock();
NodeRepoMock nodeRepository = new NodeRepoMock();
ManualClock clock = new ManualClock(Instant.ofEpochMilli(1600001000000L));
NodeSpec initialSpec = createNodeSpecWithDumpRequest(nodeRepository, JvmDumpProducer.NAME, null);
VespaServiceDumper reporter = new VespaServiceDumperImpl(operations, syncClient, nodeRepository, clock, Sleeper.NOOP);
NodeAgentContextImpl context = new NodeAgentContextImpl.Builder(initialSpec)
.fileSystem(fileSystem)
.build();
reporter.processServiceDumpRequest(context);
String expectedJson =
"{\"createdMillis\":1600000000000,\"startedAt\":1600001000000,\"completedAt\":1600001000000," +
"\"location\":\"s3:
"\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"]}";
assertReportEquals(nodeRepository, expectedJson);
verify(operations).executeCommandInContainerAsRoot(
context, "/opt/vespa/bin/vespa-jvm-dumper", "default/container.1", "/opt/vespa/tmp/vespa-service-dump/jvm-dump");
List<URI> expectedUris = List.of(
URI.create("s3:
URI.create("s3:
assertSyncedFiles(context, syncClient, expectedUris);
} | VespaServiceDumper reporter = new VespaServiceDumperImpl(operations, syncClient, nodeRepository, clock, Sleeper.NOOP); | void generates_jvm_dump_from_request() throws IOException {
ContainerOperations operations = mock(ContainerOperations.class);
when(operations.executeCommandInContainerAsRoot(any(), any()))
.thenAnswer(invocation -> {
Files.createFile(tmpDirectory.resolve("vespa-service-dump/" + JvmDumpProducer.NAME + "/heap.bin"));
Files.createFile(tmpDirectory.resolve("vespa-service-dump/" + JvmDumpProducer.NAME + "/jstack"));
return new CommandResult(null, 0, "result");
});
SyncClient syncClient = createSyncClientMock();
NodeRepoMock nodeRepository = new NodeRepoMock();
ManualClock clock = new ManualClock(Instant.ofEpochMilli(1600001000000L));
NodeSpec initialSpec = createNodeSpecWithDumpRequest(nodeRepository, JvmDumpProducer.NAME, null);
VespaServiceDumper reporter = new VespaServiceDumperImpl(operations, syncClient, nodeRepository, clock, Sleeper.NOOP);
NodeAgentContextImpl context = new NodeAgentContextImpl.Builder(initialSpec)
.fileSystem(fileSystem)
.build();
reporter.processServiceDumpRequest(context);
String expectedJson =
"{\"createdMillis\":1600000000000,\"startedAt\":1600001000000,\"completedAt\":1600001000000," +
"\"location\":\"s3:
"\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"]}";
assertReportEquals(nodeRepository, expectedJson);
verify(operations).executeCommandInContainerAsRoot(
context, "/opt/vespa/bin/vespa-jvm-dumper", "default/container.1", "/opt/vespa/tmp/vespa-service-dump/jvm-dump");
List<URI> expectedUris = List.of(
URI.create("s3:
URI.create("s3:
assertSyncedFiles(context, syncClient, expectedUris);
} | class VespaServiceDumperImplTest {
private static final String HOSTNAME = "host-1.domain.tld";
private final FileSystem fileSystem = TestFileSystem.create();;
private final Path tmpDirectory = fileSystem.getPath("/home/docker/container-storage/host-1/opt/vespa/tmp");
@BeforeEach
void create_tmp_directory() throws IOException {
Files.createDirectories(tmpDirectory);
}
@Test
void creates_valid_dump_id_from_dump_request() {
long nowMillis = Instant.now().toEpochMilli();
ServiceDumpReport request = new ServiceDumpReport(
nowMillis, null, null, null, null, "default/container.3", null, null, List.of(JvmDumpProducer.NAME), null);
String dumpId = VespaServiceDumperImpl.createDumpId(request);
assertEquals("default-container-3-" + nowMillis, dumpId);
}
@Test
@Test
void invokes_perf_commands_when_generating_perf_report() {
ContainerOperations operations = mock(ContainerOperations.class);
when(operations.executeCommandInContainerAsRoot(any(), any()))
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, ""))
.thenReturn(new CommandResult(null, 0, ""));
SyncClient syncClient = createSyncClientMock();
NodeRepoMock nodeRepository = new NodeRepoMock();
ManualClock clock = new ManualClock(Instant.ofEpochMilli(1600001000000L));
NodeSpec nodeSpec = createNodeSpecWithDumpRequest(nodeRepository, PerfReportProducer.NAME, new ServiceDumpReport.DumpOptions(true, 45.0));
VespaServiceDumper reporter = new VespaServiceDumperImpl(operations, syncClient, nodeRepository, clock, Sleeper.NOOP);
NodeAgentContextImpl context = new NodeAgentContextImpl.Builder(nodeSpec)
.fileSystem(fileSystem)
.build();
reporter.processServiceDumpRequest(context);
verify(operations).executeCommandInContainerAsRoot(
context, "/opt/vespa/libexec/vespa/find-pid", "default/container.1");
verify(operations).executeCommandInContainerAsRoot(
context, "perf", "record", "-g", "--output=/opt/vespa/tmp/vespa-service-dump/perf-report/perf-record.bin",
"--pid=12345", "sleep", "45");
verify(operations).executeCommandInContainerAsRoot(
context, "bash", "-c", "perf report --input=/opt/vespa/tmp/vespa-service-dump/perf-report/perf-record.bin" +
" > /opt/vespa/tmp/vespa-service-dump/perf-report/perf-report.txt");
}
@Test
void invokes_jcmd_commands_when_creating_jfr_recording() {
ContainerOperations operations = mock(ContainerOperations.class);
when(operations.executeCommandInContainerAsRoot(any(), any()))
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, "ok"))
.thenReturn(new CommandResult(null, 0, "name=host-admin success"));
SyncClient syncClient = createSyncClientMock();
NodeRepoMock nodeRepository = new NodeRepoMock();
ManualClock clock = new ManualClock(Instant.ofEpochMilli(1600001000000L));
NodeSpec nodeSpec = createNodeSpecWithDumpRequest(
nodeRepository, JavaFlightRecorder.NAME, new ServiceDumpReport.DumpOptions(null, null));
VespaServiceDumper reporter = new VespaServiceDumperImpl(operations, syncClient, nodeRepository, clock, Sleeper.NOOP);
NodeAgentContextImpl context = new NodeAgentContextImpl.Builder(nodeSpec)
.fileSystem(fileSystem)
.build();
reporter.processServiceDumpRequest(context);
verify(operations).executeCommandInContainerAsRoot(
context, "/opt/vespa/libexec/vespa/find-pid", "default/container.1");
verify(operations).executeCommandInContainerAsRoot(
context, "jcmd", "12345", "JFR.start", "name=host-admin", "path-to-gc-roots=true", "settings=profile",
"filename=/opt/vespa/tmp/vespa-service-dump/jfr-recording/recording.jfr", "duration=30s");
verify(operations).executeCommandInContainerAsRoot(context, "jcmd", "12345", "JFR.check", "name=host-admin");
}
private static NodeSpec createNodeSpecWithDumpRequest(NodeRepoMock repository, String artifactName, ServiceDumpReport.DumpOptions options) {
ServiceDumpReport request = ServiceDumpReport.createRequestReport(
Instant.ofEpochMilli(1600000000000L), null, "default/container.1", List.of(artifactName), options);
NodeSpec spec = NodeSpec.Builder
.testSpec(HOSTNAME, NodeState.active)
.report(ServiceDumpReport.REPORT_ID, request.toJsonNode())
.archiveUri(URI.create("s3:
.build();
repository.updateNodeSpec(spec);
return spec;
}
private static void assertReportEquals(NodeRepoMock nodeRepository, String expectedJson) {
ServiceDumpReport report = nodeRepository.getNode(HOSTNAME).reports()
.getReport(ServiceDumpReport.REPORT_ID, ServiceDumpReport.class).get();
String actualJson = report.toJson();
assertEquals(expectedJson, actualJson);
}
@SuppressWarnings("unchecked")
private static void assertSyncedFiles(NodeAgentContextImpl context, SyncClient client, List<URI> expectedDestinations) {
ArgumentCaptor<List<SyncFileInfo>> filesCaptor = ArgumentCaptor.forClass(List.class);
verify(client).sync(eq(context), filesCaptor.capture(), eq(Integer.MAX_VALUE));
List<SyncFileInfo> actualFiles = filesCaptor.getValue();
List<URI> actualFilenames = actualFiles.stream()
.map(SyncFileInfo::destination)
.sorted()
.collect(Collectors.toList());
assertEquals(expectedDestinations, actualFilenames);
}
private SyncClient createSyncClientMock() {
SyncClient client = mock(SyncClient.class);
when(client.sync(any(), any(), anyInt()))
.thenReturn(true);
return client;
}
} | class VespaServiceDumperImplTest {
private static final String HOSTNAME = "host-1.domain.tld";
private final FileSystem fileSystem = TestFileSystem.create();;
private final Path tmpDirectory = fileSystem.getPath("/home/docker/container-storage/host-1/opt/vespa/tmp");
@BeforeEach
void create_tmp_directory() throws IOException {
Files.createDirectories(tmpDirectory);
}
@Test
void creates_valid_dump_id_from_dump_request() {
long nowMillis = Instant.now().toEpochMilli();
ServiceDumpReport request = new ServiceDumpReport(
nowMillis, null, null, null, null, "default/container.3", null, null, List.of(JvmDumpProducer.NAME), null);
String dumpId = VespaServiceDumperImpl.createDumpId(request);
assertEquals("default-container-3-" + nowMillis, dumpId);
}
@Test
@Test
void invokes_perf_commands_when_generating_perf_report() {
ContainerOperations operations = mock(ContainerOperations.class);
when(operations.executeCommandInContainerAsRoot(any(), any()))
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, ""))
.thenReturn(new CommandResult(null, 0, ""));
SyncClient syncClient = createSyncClientMock();
NodeRepoMock nodeRepository = new NodeRepoMock();
ManualClock clock = new ManualClock(Instant.ofEpochMilli(1600001000000L));
NodeSpec nodeSpec = createNodeSpecWithDumpRequest(nodeRepository, PerfReportProducer.NAME, new ServiceDumpReport.DumpOptions(true, 45.0));
VespaServiceDumper reporter = new VespaServiceDumperImpl(operations, syncClient, nodeRepository, clock, Sleeper.NOOP);
NodeAgentContextImpl context = new NodeAgentContextImpl.Builder(nodeSpec)
.fileSystem(fileSystem)
.build();
reporter.processServiceDumpRequest(context);
verify(operations).executeCommandInContainerAsRoot(
context, "/opt/vespa/libexec/vespa/find-pid", "default/container.1");
verify(operations).executeCommandInContainerAsRoot(
context, "perf", "record", "-g", "--output=/opt/vespa/tmp/vespa-service-dump/perf-report/perf-record.bin",
"--pid=12345", "sleep", "45");
verify(operations).executeCommandInContainerAsRoot(
context, "bash", "-c", "perf report --input=/opt/vespa/tmp/vespa-service-dump/perf-report/perf-record.bin" +
" > /opt/vespa/tmp/vespa-service-dump/perf-report/perf-report.txt");
}
@Test
void invokes_jcmd_commands_when_creating_jfr_recording() {
ContainerOperations operations = mock(ContainerOperations.class);
when(operations.executeCommandInContainerAsRoot(any(), any()))
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, "ok"))
.thenReturn(new CommandResult(null, 0, "name=host-admin success"));
SyncClient syncClient = createSyncClientMock();
NodeRepoMock nodeRepository = new NodeRepoMock();
ManualClock clock = new ManualClock(Instant.ofEpochMilli(1600001000000L));
NodeSpec nodeSpec = createNodeSpecWithDumpRequest(
nodeRepository, JavaFlightRecorder.NAME, new ServiceDumpReport.DumpOptions(null, null));
VespaServiceDumper reporter = new VespaServiceDumperImpl(operations, syncClient, nodeRepository, clock, Sleeper.NOOP);
NodeAgentContextImpl context = new NodeAgentContextImpl.Builder(nodeSpec)
.fileSystem(fileSystem)
.build();
reporter.processServiceDumpRequest(context);
verify(operations).executeCommandInContainerAsRoot(
context, "/opt/vespa/libexec/vespa/find-pid", "default/container.1");
verify(operations).executeCommandInContainerAsRoot(
context, "jcmd", "12345", "JFR.start", "name=host-admin", "path-to-gc-roots=true", "settings=profile",
"filename=/opt/vespa/tmp/vespa-service-dump/jfr-recording/recording.jfr", "duration=30s");
verify(operations).executeCommandInContainerAsRoot(context, "jcmd", "12345", "JFR.check", "name=host-admin");
}
private static NodeSpec createNodeSpecWithDumpRequest(NodeRepoMock repository, String artifactName, ServiceDumpReport.DumpOptions options) {
ServiceDumpReport request = ServiceDumpReport.createRequestReport(
Instant.ofEpochMilli(1600000000000L), null, "default/container.1", List.of(artifactName), options);
NodeSpec spec = NodeSpec.Builder
.testSpec(HOSTNAME, NodeState.active)
.report(ServiceDumpReport.REPORT_ID, request.toJsonNode())
.archiveUri(URI.create("s3:
.build();
repository.updateNodeSpec(spec);
return spec;
}
private static void assertReportEquals(NodeRepoMock nodeRepository, String expectedJson) {
ServiceDumpReport report = nodeRepository.getNode(HOSTNAME).reports()
.getReport(ServiceDumpReport.REPORT_ID, ServiceDumpReport.class).get();
String actualJson = report.toJson();
assertEquals(expectedJson, actualJson);
}
@SuppressWarnings("unchecked")
private static void assertSyncedFiles(NodeAgentContextImpl context, SyncClient client, List<URI> expectedDestinations) {
ArgumentCaptor<List<SyncFileInfo>> filesCaptor = ArgumentCaptor.forClass(List.class);
verify(client).sync(eq(context), filesCaptor.capture(), eq(Integer.MAX_VALUE));
List<SyncFileInfo> actualFiles = filesCaptor.getValue();
List<URI> actualFilenames = actualFiles.stream()
.map(SyncFileInfo::destination)
.sorted()
.collect(Collectors.toList());
assertEquals(expectedDestinations, actualFilenames);
}
private SyncClient createSyncClientMock() {
SyncClient client = mock(SyncClient.class);
when(client.sync(any(), any(), anyInt()))
.thenReturn(true);
return client;
}
} |
So the tenant owner will not be able to re-create a tenant once it is deleted, without operators first clearing the tombstone? I guess that's OK. | public void delete(TenantName tenant, Supplier<Credentials> credentials, boolean forget) {
try (Lock lock = lock(tenant)) {
Tenant oldTenant = get(tenant, true)
.orElseThrow(() -> new NotExistsException("Could not delete tenant '" + tenant + "': Tenant not found"));
if (oldTenant.type() != Tenant.Type.deleted) {
if (!controller.applications().asList(tenant).isEmpty())
throw new IllegalArgumentException("Could not delete tenant '" + tenant.value()
+ "': This tenant has active applications");
accessControl.deleteTenant(tenant, credentials.get());
controller.notificationsDb().removeNotifications(NotificationSource.from(tenant));
}
if (forget) curator.removeTenant(tenant);
else curator.writeTenant(new DeletedTenant(tenant, oldTenant.createdAt(), controller.clock().instant()));
}
} | accessControl.deleteTenant(tenant, credentials.get()); | public void delete(TenantName tenant, Supplier<Credentials> credentials, boolean forget) {
try (Lock lock = lock(tenant)) {
Tenant oldTenant = get(tenant, true)
.orElseThrow(() -> new NotExistsException("Could not delete tenant '" + tenant + "': Tenant not found"));
if (oldTenant.type() != Tenant.Type.deleted) {
if (!controller.applications().asList(tenant).isEmpty())
throw new IllegalArgumentException("Could not delete tenant '" + tenant.value()
+ "': This tenant has active applications");
accessControl.deleteTenant(tenant, credentials.get());
controller.notificationsDb().removeNotifications(NotificationSource.from(tenant));
}
if (forget) curator.removeTenant(tenant);
else curator.writeTenant(new DeletedTenant(tenant, oldTenant.createdAt(), controller.clock().instant()));
}
} | class TenantController {
private static final Logger log = Logger.getLogger(TenantController.class.getName());
private final Controller controller;
private final CuratorDb curator;
private final AccessControl accessControl;
public TenantController(Controller controller, CuratorDb curator, AccessControl accessControl, FlagSource flagSource) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.curator = Objects.requireNonNull(curator, "curator must be non-null");
this.accessControl = accessControl;
Once.after(Duration.ofMinutes(1), () -> {
Instant start = controller.clock().instant();
int count = 0;
for (TenantName name : curator.readTenantNames()) {
lockIfPresent(name, LockedTenant.class, this::store);
count++;
}
log.log(Level.INFO, Text.format("Wrote %d tenants in %s", count,
Duration.between(start, controller.clock().instant())));
});
}
/** Returns a list of all known, non-deleted tenants sorted by name */
public List<Tenant> asList() {
return asList(false);
}
/** Returns a list of all known tenants sorted by name */
public List<Tenant> asList(boolean includeDeleted) {
return curator.readTenants().stream()
.filter(tenant -> tenant.type() != Tenant.Type.deleted || includeDeleted)
.sorted(Comparator.comparing(Tenant::name))
.collect(Collectors.toList());
}
/** Locks a tenant for modification and applies the given action. */
public <T extends LockedTenant> void lockIfPresent(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
get(name).map(tenant -> LockedTenant.of(tenant, lock))
.map(token::cast)
.ifPresent(action);
}
}
/** Lock a tenant for modification and apply action. Throws if the tenant does not exist */
public <T extends LockedTenant> void lockOrThrow(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
action.accept(token.cast(LockedTenant.of(require(name), lock)));
}
}
/** Returns the tenant with the given name, or throws. */
public Tenant require(TenantName name) {
return get(name).orElseThrow(() -> new IllegalArgumentException("No such tenant '" + name + "'."));
}
/** Returns the tenant with the given name, and ensures the type */
public <T extends Tenant> T require(TenantName name, Class<T> tenantType) {
return get(name)
.map(t -> {
try { return tenantType.cast(t); } catch (ClassCastException e) {
throw new IllegalArgumentException("Tenant '" + name + "' was of type '" + t.getClass().getSimpleName() + "' and not '" + tenantType.getSimpleName() + "'");
}
})
.orElseThrow(() -> new IllegalArgumentException("No such tenant '" + name + "'."));
}
/** Replace and store any previous version of given tenant */
public void store(LockedTenant tenant) {
curator.writeTenant(tenant.get());
}
/** Create a tenant, provided the given credentials are valid. */
public void create(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
TenantId.validate(tenantSpec.tenant().value());
requireNonExistent(tenantSpec.tenant());
curator.writeTenant(accessControl.createTenant(tenantSpec, controller.clock().instant(), credentials, asList()));
}
}
/** Find tenant by name */
public Optional<Tenant> get(TenantName name) {
return get(name, false);
}
public Optional<Tenant> get(TenantName name, boolean includeDeleted) {
return curator.readTenant(name)
.filter(tenant -> tenant.type() != Tenant.Type.deleted || includeDeleted);
}
/** Find tenant by name */
public Optional<Tenant> get(String name) {
return get(TenantName.from(name));
}
/** Updates the tenant contained in the given tenant spec with new data. */
public void update(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
curator.writeTenant(accessControl.updateTenant(tenantSpec, credentials, asList(),
controller.applications().asList(tenantSpec.tenant())));
}
}
/**
* Update last login times for the given tenant at the given user levers with the given instant, but only if the
* new instant is later
*/
public void updateLastLogin(TenantName tenantName, List<LastLoginInfo.UserLevel> userLevels, Instant loggedInAt) {
try (Lock lock = lock(tenantName)) {
Tenant tenant = require(tenantName);
LastLoginInfo loginInfo = tenant.lastLoginInfo();
for (LastLoginInfo.UserLevel userLevel : userLevels)
loginInfo = loginInfo.withLastLoginIfLater(userLevel, loggedInAt);
if (tenant.lastLoginInfo().equals(loginInfo)) return;
curator.writeTenant(LockedTenant.of(tenant, lock).with(loginInfo).get());
}
}
/** Deletes the given tenant. */
private void requireNonExistent(TenantName name) {
if (SystemApplication.TENANT.equals(name)
|| get(name, true).isPresent()
|| get(name.value().replace('-', '_')).isPresent()) {
throw new IllegalArgumentException("Tenant '" + name + "' already exists");
}
}
/**
* Returns a lock which provides exclusive rights to changing this tenant.
* Any operation which stores a tenant need to first acquire this lock, then read, modify
* and store the tenant, and finally release (close) the lock.
*/
private Lock lock(TenantName tenant) {
return curator.lock(tenant);
}
} | class TenantController {
private static final Logger log = Logger.getLogger(TenantController.class.getName());
private final Controller controller;
private final CuratorDb curator;
private final AccessControl accessControl;
public TenantController(Controller controller, CuratorDb curator, AccessControl accessControl, FlagSource flagSource) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.curator = Objects.requireNonNull(curator, "curator must be non-null");
this.accessControl = accessControl;
Once.after(Duration.ofMinutes(1), () -> {
Instant start = controller.clock().instant();
int count = 0;
for (TenantName name : curator.readTenantNames()) {
lockIfPresent(name, LockedTenant.class, this::store);
count++;
}
log.log(Level.INFO, Text.format("Wrote %d tenants in %s", count,
Duration.between(start, controller.clock().instant())));
});
}
/** Returns a list of all known, non-deleted tenants sorted by name */
public List<Tenant> asList() {
return asList(false);
}
/** Returns a list of all known tenants sorted by name */
public List<Tenant> asList(boolean includeDeleted) {
return curator.readTenants().stream()
.filter(tenant -> tenant.type() != Tenant.Type.deleted || includeDeleted)
.sorted(Comparator.comparing(Tenant::name))
.collect(Collectors.toList());
}
/** Locks a tenant for modification and applies the given action. */
public <T extends LockedTenant> void lockIfPresent(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
get(name).map(tenant -> LockedTenant.of(tenant, lock))
.map(token::cast)
.ifPresent(action);
}
}
/** Lock a tenant for modification and apply action. Throws if the tenant does not exist */
public <T extends LockedTenant> void lockOrThrow(TenantName name, Class<T> token, Consumer<T> action) {
try (Lock lock = lock(name)) {
action.accept(token.cast(LockedTenant.of(require(name), lock)));
}
}
/** Returns the tenant with the given name, or throws. */
public Tenant require(TenantName name) {
return get(name).orElseThrow(() -> new IllegalArgumentException("No such tenant '" + name + "'."));
}
/** Returns the tenant with the given name, and ensures the type */
public <T extends Tenant> T require(TenantName name, Class<T> tenantType) {
return get(name)
.map(t -> {
try { return tenantType.cast(t); } catch (ClassCastException e) {
throw new IllegalArgumentException("Tenant '" + name + "' was of type '" + t.getClass().getSimpleName() + "' and not '" + tenantType.getSimpleName() + "'");
}
})
.orElseThrow(() -> new IllegalArgumentException("No such tenant '" + name + "'."));
}
/** Replace and store any previous version of given tenant */
public void store(LockedTenant tenant) {
curator.writeTenant(tenant.get());
}
/** Create a tenant, provided the given credentials are valid. */
public void create(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
TenantId.validate(tenantSpec.tenant().value());
requireNonExistent(tenantSpec.tenant());
curator.writeTenant(accessControl.createTenant(tenantSpec, controller.clock().instant(), credentials, asList()));
}
}
/** Find tenant by name */
public Optional<Tenant> get(TenantName name) {
return get(name, false);
}
public Optional<Tenant> get(TenantName name, boolean includeDeleted) {
return curator.readTenant(name)
.filter(tenant -> tenant.type() != Tenant.Type.deleted || includeDeleted);
}
/** Find tenant by name */
public Optional<Tenant> get(String name) {
return get(TenantName.from(name));
}
/** Updates the tenant contained in the given tenant spec with new data. */
public void update(TenantSpec tenantSpec, Credentials credentials) {
try (Lock lock = lock(tenantSpec.tenant())) {
curator.writeTenant(accessControl.updateTenant(tenantSpec, credentials, asList(),
controller.applications().asList(tenantSpec.tenant())));
}
}
/**
* Update last login times for the given tenant at the given user levers with the given instant, but only if the
* new instant is later
*/
public void updateLastLogin(TenantName tenantName, List<LastLoginInfo.UserLevel> userLevels, Instant loggedInAt) {
try (Lock lock = lock(tenantName)) {
Tenant tenant = require(tenantName);
LastLoginInfo loginInfo = tenant.lastLoginInfo();
for (LastLoginInfo.UserLevel userLevel : userLevels)
loginInfo = loginInfo.withLastLoginIfLater(userLevel, loggedInAt);
if (tenant.lastLoginInfo().equals(loginInfo)) return;
curator.writeTenant(LockedTenant.of(tenant, lock).with(loginInfo).get());
}
}
/** Deletes the given tenant. */
private void requireNonExistent(TenantName name) {
if (SystemApplication.TENANT.equals(name)
|| get(name, true).isPresent()
|| get(name.value().replace('-', '_')).isPresent()) {
throw new IllegalArgumentException("Tenant '" + name + "' already exists");
}
}
/**
* Returns a lock which provides exclusive rights to changing this tenant.
* Any operation which stores a tenant need to first acquire this lock, then read, modify
* and store the tenant, and finally release (close) the lock.
*/
private Lock lock(TenantName tenant) {
return curator.lock(tenant);
}
} |
Just don't delete any tenants before all controllers have upgraded! | private static String valueOf(Tenant.Type type) {
switch (type) {
case athenz: return "athenz";
case cloud: return "cloud";
case deleted: return "deleted";
default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'.");
}
} | case deleted: return "deleted"; | private static String valueOf(Tenant.Type type) {
switch (type) {
case athenz: return "athenz";
case cloud: return "cloud";
case deleted: return "deleted";
default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'.");
}
} | class TenantSerializer {
private static final String nameField = "name";
private static final String typeField = "type";
private static final String athenzDomainField = "athenzDomain";
private static final String propertyField = "property";
private static final String propertyIdField = "propertyId";
private static final String creatorField = "creator";
private static final String createdAtField = "createdAt";
private static final String deletedAtField = "deletedAt";
private static final String contactField = "contact";
private static final String contactUrlField = "contactUrl";
private static final String propertyUrlField = "propertyUrl";
private static final String issueTrackerUrlField = "issueTrackerUrl";
private static final String personsField = "persons";
private static final String personField = "person";
private static final String queueField = "queue";
private static final String componentField = "component";
private static final String billingInfoField = "billingInfo";
private static final String customerIdField = "customerId";
private static final String productCodeField = "productCode";
private static final String pemDeveloperKeysField = "pemDeveloperKeys";
private static final String tenantInfoField = "info";
private static final String lastLoginInfoField = "lastLoginInfo";
private static final String secretStoresField = "secretStores";
private static final String archiveAccessRoleField = "archiveAccessRole";
private static final String awsIdField = "awsId";
private static final String roleField = "role";
public Slime toSlime(Tenant tenant) {
Slime slime = new Slime();
Cursor tenantObject = slime.setObject();
tenantObject.setString(nameField, tenant.name().value());
tenantObject.setString(typeField, valueOf(tenant.type()));
tenantObject.setLong(createdAtField, tenant.createdAt().toEpochMilli());
toSlime(tenant.lastLoginInfo(), tenantObject.setObject(lastLoginInfoField));
switch (tenant.type()) {
case athenz: toSlime((AthenzTenant) tenant, tenantObject); break;
case cloud: toSlime((CloudTenant) tenant, tenantObject); break;
case deleted: toSlime((DeletedTenant) tenant, tenantObject); break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
return slime;
}
private void toSlime(AthenzTenant tenant, Cursor tenantObject) {
tenantObject.setString(athenzDomainField, tenant.domain().getName());
tenantObject.setString(propertyField, tenant.property().id());
tenant.propertyId().ifPresent(propertyId -> tenantObject.setString(propertyIdField, propertyId.id()));
tenant.contact().ifPresent(contact -> {
Cursor contactCursor = tenantObject.setObject(contactField);
writeContact(contact, contactCursor);
});
}
private void toSlime(CloudTenant tenant, Cursor root) {
var legacyBillingInfo = new BillingInfo("customer", "Vespa");
tenant.creator().ifPresent(creator -> root.setString(creatorField, creator.getName()));
developerKeysToSlime(tenant.developerKeys(), root.setArray(pemDeveloperKeysField));
toSlime(legacyBillingInfo, root.setObject(billingInfoField));
toSlime(tenant.info(), root);
toSlime(tenant.tenantSecretStores(), root);
tenant.archiveAccessRole().ifPresent(role -> root.setString(archiveAccessRoleField, role));
}
private void toSlime(DeletedTenant tenant, Cursor root) {
root.setLong(deletedAtField, tenant.deletedAt().toEpochMilli());
}
private void developerKeysToSlime(BiMap<PublicKey, Principal> keys, Cursor array) {
keys.forEach((key, user) -> {
Cursor object = array.addObject();
object.setString("key", KeyUtils.toPem(key));
object.setString("user", user.getName());
});
}
private void toSlime(BillingInfo billingInfo, Cursor billingInfoObject) {
billingInfoObject.setString(customerIdField, billingInfo.customerId());
billingInfoObject.setString(productCodeField, billingInfo.productCode());
}
private void toSlime(LastLoginInfo lastLoginInfo, Cursor lastLoginInfoObject) {
for (LastLoginInfo.UserLevel userLevel: LastLoginInfo.UserLevel.values()) {
lastLoginInfo.get(userLevel).ifPresent(lastLoginAt ->
lastLoginInfoObject.setLong(valueOf(userLevel), lastLoginAt.toEpochMilli()));
}
}
public Tenant tenantFrom(Slime slime) {
Inspector tenantObject = slime.get();
Tenant.Type type = typeOf(tenantObject.field(typeField).asString());
switch (type) {
case athenz: return athenzTenantFrom(tenantObject);
case cloud: return cloudTenantFrom(tenantObject);
case deleted: return deletedTenantFrom(tenantObject);
default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'.");
}
}
private AthenzTenant athenzTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
AthenzDomain domain = new AthenzDomain(tenantObject.field(athenzDomainField).asString());
Property property = new Property(tenantObject.field(propertyField).asString());
Optional<PropertyId> propertyId = SlimeUtils.optionalString(tenantObject.field(propertyIdField)).map(PropertyId::new);
Optional<Contact> contact = contactFrom(tenantObject.field(contactField));
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
return new AthenzTenant(name, domain, property, propertyId, contact, createdAt, lastLoginInfo);
}
private CloudTenant cloudTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
Optional<Principal> creator = SlimeUtils.optionalString(tenantObject.field(creatorField)).map(SimplePrincipal::new);
BiMap<PublicKey, Principal> developerKeys = developerKeysFromSlime(tenantObject.field(pemDeveloperKeysField));
TenantInfo info = tenantInfoFromSlime(tenantObject.field(tenantInfoField));
List<TenantSecretStore> tenantSecretStores = secretStoresFromSlime(tenantObject.field(secretStoresField));
Optional<String> archiveAccessRole = SlimeUtils.optionalString(tenantObject.field(archiveAccessRoleField));
return new CloudTenant(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccessRole);
}
private DeletedTenant deletedTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
Instant deletedAt = SlimeUtils.instant(tenantObject.field(deletedAtField));
return new DeletedTenant(name, createdAt, deletedAt);
}
private BiMap<PublicKey, Principal> developerKeysFromSlime(Inspector array) {
ImmutableBiMap.Builder<PublicKey, Principal> keys = ImmutableBiMap.builder();
array.traverse((ArrayTraverser) (__, keyObject) ->
keys.put(KeyUtils.fromPemEncodedPublicKey(keyObject.field("key").asString()),
new SimplePrincipal(keyObject.field("user").asString())));
return keys.build();
}
TenantInfo tenantInfoFromSlime(Inspector infoObject) {
if (!infoObject.valid()) return TenantInfo.EMPTY;
return TenantInfo.EMPTY
.withName(infoObject.field("name").asString())
.withEmail(infoObject.field("email").asString())
.withWebsite(infoObject.field("website").asString())
.withContactName(infoObject.field("contactName").asString())
.withContactEmail(infoObject.field("contactEmail").asString())
.withInvoiceEmail(infoObject.field("invoiceEmail").asString())
.withAddress(tenantInfoAddressFromSlime(infoObject.field("address")))
.withBillingContact(tenantInfoBillingContactFromSlime(infoObject.field("billingContact")));
}
private TenantInfoAddress tenantInfoAddressFromSlime(Inspector addressObject) {
return TenantInfoAddress.EMPTY
.withAddressLines(addressObject.field("addressLines").asString())
.withPostalCodeOrZip(addressObject.field("postalCodeOrZip").asString())
.withCity(addressObject.field("city").asString())
.withStateRegionProvince(addressObject.field("stateRegionProvince").asString())
.withCountry(addressObject.field("country").asString());
}
private TenantInfoBillingContact tenantInfoBillingContactFromSlime(Inspector billingObject) {
return TenantInfoBillingContact.EMPTY
.withName(billingObject.field("name").asString())
.withEmail(billingObject.field("email").asString())
.withPhone(billingObject.field("phone").asString())
.withAddress(tenantInfoAddressFromSlime(billingObject.field("address")));
}
private List<TenantSecretStore> secretStoresFromSlime(Inspector secretStoresObject) {
List<TenantSecretStore> secretStores = new ArrayList<>();
if (!secretStoresObject.valid()) return secretStores;
secretStoresObject.traverse((ArrayTraverser) (index, inspector) -> {
secretStores.add(
new TenantSecretStore(
inspector.field(nameField).asString(),
inspector.field(awsIdField).asString(),
inspector.field(roleField).asString()
)
);
});
return secretStores;
}
private LastLoginInfo lastLoginInfoFromSlime(Inspector lastLoginInfoObject) {
Map<LastLoginInfo.UserLevel, Instant> lastLoginByUserLevel = new HashMap<>();
lastLoginInfoObject.traverse((String name, Inspector value) ->
lastLoginByUserLevel.put(userLevelOf(name), SlimeUtils.instant(value)));
return new LastLoginInfo(lastLoginByUserLevel);
}
void toSlime(TenantInfo info, Cursor parentCursor) {
if (info.isEmpty()) return;
Cursor infoCursor = parentCursor.setObject("info");
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(List<TenantSecretStore> tenantSecretStores, Cursor parentCursor) {
if (tenantSecretStores.isEmpty()) return;
Cursor secretStoresCursor = parentCursor.setArray(secretStoresField);
tenantSecretStores.forEach(tenantSecretStore -> {
Cursor secretStoreCursor = secretStoresCursor.addObject();
secretStoreCursor.setString(nameField, tenantSecretStore.getName());
secretStoreCursor.setString(awsIdField, tenantSecretStore.getAwsId());
secretStoreCursor.setString(roleField, tenantSecretStore.getRole());
});
}
private Optional<Contact> contactFrom(Inspector object) {
if ( ! object.valid()) return Optional.empty();
URI contactUrl = URI.create(object.field(contactUrlField).asString());
URI propertyUrl = URI.create(object.field(propertyUrlField).asString());
URI issueTrackerUrl = URI.create(object.field(issueTrackerUrlField).asString());
List<List<String>> persons = personsFrom(object.field(personsField));
String queue = object.field(queueField).asString();
Optional<String> component = object.field(componentField).valid() ? Optional.of(object.field(componentField).asString()) : Optional.empty();
return Optional.of(new Contact(contactUrl,
propertyUrl,
issueTrackerUrl,
persons,
queue,
component));
}
private void writeContact(Contact contact, Cursor contactCursor) {
contactCursor.setString(contactUrlField, contact.url().toString());
contactCursor.setString(propertyUrlField, contact.propertyUrl().toString());
contactCursor.setString(issueTrackerUrlField, contact.issueTrackerUrl().toString());
Cursor personsArray = contactCursor.setArray(personsField);
contact.persons().forEach(personList -> {
Cursor personArray = personsArray.addArray();
personList.forEach(person -> {
Cursor personObject = personArray.addObject();
personObject.setString(personField, person);
});
});
contactCursor.setString(queueField, contact.queue());
contact.component().ifPresent(component -> contactCursor.setString(componentField, component));
}
private List<List<String>> personsFrom(Inspector array) {
List<List<String>> personLists = new ArrayList<>();
array.traverse((ArrayTraverser) (i, personArray) -> {
List<String> persons = new ArrayList<>();
personArray.traverse((ArrayTraverser) (j, inspector) -> persons.add(inspector.field("person").asString()));
personLists.add(persons);
});
return personLists;
}
private static Tenant.Type typeOf(String value) {
switch (value) {
case "athenz": return Tenant.Type.athenz;
case "cloud": return Tenant.Type.cloud;
case "deleted": return Tenant.Type.deleted;
default: throw new IllegalArgumentException("Unknown tenant type '" + value + "'.");
}
}
private static LastLoginInfo.UserLevel userLevelOf(String value) {
switch (value) {
case "user": return LastLoginInfo.UserLevel.user;
case "developer": return LastLoginInfo.UserLevel.developer;
case "administrator": return LastLoginInfo.UserLevel.administrator;
default: throw new IllegalArgumentException("Unknown user level '" + value + "'.");
}
}
private static String valueOf(LastLoginInfo.UserLevel userLevel) {
switch (userLevel) {
case user: return "user";
case developer: return "developer";
case administrator: return "administrator";
default: throw new IllegalArgumentException("Unexpected user level '" + userLevel + "'.");
}
}
} | class TenantSerializer {
private static final String nameField = "name";
private static final String typeField = "type";
private static final String athenzDomainField = "athenzDomain";
private static final String propertyField = "property";
private static final String propertyIdField = "propertyId";
private static final String creatorField = "creator";
private static final String createdAtField = "createdAt";
private static final String deletedAtField = "deletedAt";
private static final String contactField = "contact";
private static final String contactUrlField = "contactUrl";
private static final String propertyUrlField = "propertyUrl";
private static final String issueTrackerUrlField = "issueTrackerUrl";
private static final String personsField = "persons";
private static final String personField = "person";
private static final String queueField = "queue";
private static final String componentField = "component";
private static final String billingInfoField = "billingInfo";
private static final String customerIdField = "customerId";
private static final String productCodeField = "productCode";
private static final String pemDeveloperKeysField = "pemDeveloperKeys";
private static final String tenantInfoField = "info";
private static final String lastLoginInfoField = "lastLoginInfo";
private static final String secretStoresField = "secretStores";
private static final String archiveAccessRoleField = "archiveAccessRole";
private static final String awsIdField = "awsId";
private static final String roleField = "role";
public Slime toSlime(Tenant tenant) {
Slime slime = new Slime();
Cursor tenantObject = slime.setObject();
tenantObject.setString(nameField, tenant.name().value());
tenantObject.setString(typeField, valueOf(tenant.type()));
tenantObject.setLong(createdAtField, tenant.createdAt().toEpochMilli());
toSlime(tenant.lastLoginInfo(), tenantObject.setObject(lastLoginInfoField));
switch (tenant.type()) {
case athenz: toSlime((AthenzTenant) tenant, tenantObject); break;
case cloud: toSlime((CloudTenant) tenant, tenantObject); break;
case deleted: toSlime((DeletedTenant) tenant, tenantObject); break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
return slime;
}
private void toSlime(AthenzTenant tenant, Cursor tenantObject) {
tenantObject.setString(athenzDomainField, tenant.domain().getName());
tenantObject.setString(propertyField, tenant.property().id());
tenant.propertyId().ifPresent(propertyId -> tenantObject.setString(propertyIdField, propertyId.id()));
tenant.contact().ifPresent(contact -> {
Cursor contactCursor = tenantObject.setObject(contactField);
writeContact(contact, contactCursor);
});
}
private void toSlime(CloudTenant tenant, Cursor root) {
var legacyBillingInfo = new BillingInfo("customer", "Vespa");
tenant.creator().ifPresent(creator -> root.setString(creatorField, creator.getName()));
developerKeysToSlime(tenant.developerKeys(), root.setArray(pemDeveloperKeysField));
toSlime(legacyBillingInfo, root.setObject(billingInfoField));
toSlime(tenant.info(), root);
toSlime(tenant.tenantSecretStores(), root);
tenant.archiveAccessRole().ifPresent(role -> root.setString(archiveAccessRoleField, role));
}
private void toSlime(DeletedTenant tenant, Cursor root) {
root.setLong(deletedAtField, tenant.deletedAt().toEpochMilli());
}
private void developerKeysToSlime(BiMap<PublicKey, Principal> keys, Cursor array) {
keys.forEach((key, user) -> {
Cursor object = array.addObject();
object.setString("key", KeyUtils.toPem(key));
object.setString("user", user.getName());
});
}
private void toSlime(BillingInfo billingInfo, Cursor billingInfoObject) {
billingInfoObject.setString(customerIdField, billingInfo.customerId());
billingInfoObject.setString(productCodeField, billingInfo.productCode());
}
private void toSlime(LastLoginInfo lastLoginInfo, Cursor lastLoginInfoObject) {
for (LastLoginInfo.UserLevel userLevel: LastLoginInfo.UserLevel.values()) {
lastLoginInfo.get(userLevel).ifPresent(lastLoginAt ->
lastLoginInfoObject.setLong(valueOf(userLevel), lastLoginAt.toEpochMilli()));
}
}
public Tenant tenantFrom(Slime slime) {
Inspector tenantObject = slime.get();
Tenant.Type type = typeOf(tenantObject.field(typeField).asString());
switch (type) {
case athenz: return athenzTenantFrom(tenantObject);
case cloud: return cloudTenantFrom(tenantObject);
case deleted: return deletedTenantFrom(tenantObject);
default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'.");
}
}
private AthenzTenant athenzTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
AthenzDomain domain = new AthenzDomain(tenantObject.field(athenzDomainField).asString());
Property property = new Property(tenantObject.field(propertyField).asString());
Optional<PropertyId> propertyId = SlimeUtils.optionalString(tenantObject.field(propertyIdField)).map(PropertyId::new);
Optional<Contact> contact = contactFrom(tenantObject.field(contactField));
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
return new AthenzTenant(name, domain, property, propertyId, contact, createdAt, lastLoginInfo);
}
private CloudTenant cloudTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
Optional<Principal> creator = SlimeUtils.optionalString(tenantObject.field(creatorField)).map(SimplePrincipal::new);
BiMap<PublicKey, Principal> developerKeys = developerKeysFromSlime(tenantObject.field(pemDeveloperKeysField));
TenantInfo info = tenantInfoFromSlime(tenantObject.field(tenantInfoField));
List<TenantSecretStore> tenantSecretStores = secretStoresFromSlime(tenantObject.field(secretStoresField));
Optional<String> archiveAccessRole = SlimeUtils.optionalString(tenantObject.field(archiveAccessRoleField));
return new CloudTenant(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccessRole);
}
private DeletedTenant deletedTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
Instant deletedAt = SlimeUtils.instant(tenantObject.field(deletedAtField));
return new DeletedTenant(name, createdAt, deletedAt);
}
private BiMap<PublicKey, Principal> developerKeysFromSlime(Inspector array) {
ImmutableBiMap.Builder<PublicKey, Principal> keys = ImmutableBiMap.builder();
array.traverse((ArrayTraverser) (__, keyObject) ->
keys.put(KeyUtils.fromPemEncodedPublicKey(keyObject.field("key").asString()),
new SimplePrincipal(keyObject.field("user").asString())));
return keys.build();
}
TenantInfo tenantInfoFromSlime(Inspector infoObject) {
if (!infoObject.valid()) return TenantInfo.EMPTY;
return TenantInfo.EMPTY
.withName(infoObject.field("name").asString())
.withEmail(infoObject.field("email").asString())
.withWebsite(infoObject.field("website").asString())
.withContactName(infoObject.field("contactName").asString())
.withContactEmail(infoObject.field("contactEmail").asString())
.withInvoiceEmail(infoObject.field("invoiceEmail").asString())
.withAddress(tenantInfoAddressFromSlime(infoObject.field("address")))
.withBillingContact(tenantInfoBillingContactFromSlime(infoObject.field("billingContact")));
}
private TenantInfoAddress tenantInfoAddressFromSlime(Inspector addressObject) {
return TenantInfoAddress.EMPTY
.withAddressLines(addressObject.field("addressLines").asString())
.withPostalCodeOrZip(addressObject.field("postalCodeOrZip").asString())
.withCity(addressObject.field("city").asString())
.withStateRegionProvince(addressObject.field("stateRegionProvince").asString())
.withCountry(addressObject.field("country").asString());
}
private TenantInfoBillingContact tenantInfoBillingContactFromSlime(Inspector billingObject) {
return TenantInfoBillingContact.EMPTY
.withName(billingObject.field("name").asString())
.withEmail(billingObject.field("email").asString())
.withPhone(billingObject.field("phone").asString())
.withAddress(tenantInfoAddressFromSlime(billingObject.field("address")));
}
private List<TenantSecretStore> secretStoresFromSlime(Inspector secretStoresObject) {
List<TenantSecretStore> secretStores = new ArrayList<>();
if (!secretStoresObject.valid()) return secretStores;
secretStoresObject.traverse((ArrayTraverser) (index, inspector) -> {
secretStores.add(
new TenantSecretStore(
inspector.field(nameField).asString(),
inspector.field(awsIdField).asString(),
inspector.field(roleField).asString()
)
);
});
return secretStores;
}
private LastLoginInfo lastLoginInfoFromSlime(Inspector lastLoginInfoObject) {
Map<LastLoginInfo.UserLevel, Instant> lastLoginByUserLevel = new HashMap<>();
lastLoginInfoObject.traverse((String name, Inspector value) ->
lastLoginByUserLevel.put(userLevelOf(name), SlimeUtils.instant(value)));
return new LastLoginInfo(lastLoginByUserLevel);
}
void toSlime(TenantInfo info, Cursor parentCursor) {
if (info.isEmpty()) return;
Cursor infoCursor = parentCursor.setObject("info");
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("invoiceEmail", info.invoiceEmail());
infoCursor.setString("contactName", info.contactName());
infoCursor.setString("contactEmail", info.contactEmail());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
}
private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.addressLines());
addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.name());
addressCursor.setString("email", billingContact.email());
addressCursor.setString("phone", billingContact.phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(List<TenantSecretStore> tenantSecretStores, Cursor parentCursor) {
if (tenantSecretStores.isEmpty()) return;
Cursor secretStoresCursor = parentCursor.setArray(secretStoresField);
tenantSecretStores.forEach(tenantSecretStore -> {
Cursor secretStoreCursor = secretStoresCursor.addObject();
secretStoreCursor.setString(nameField, tenantSecretStore.getName());
secretStoreCursor.setString(awsIdField, tenantSecretStore.getAwsId());
secretStoreCursor.setString(roleField, tenantSecretStore.getRole());
});
}
private Optional<Contact> contactFrom(Inspector object) {
if ( ! object.valid()) return Optional.empty();
URI contactUrl = URI.create(object.field(contactUrlField).asString());
URI propertyUrl = URI.create(object.field(propertyUrlField).asString());
URI issueTrackerUrl = URI.create(object.field(issueTrackerUrlField).asString());
List<List<String>> persons = personsFrom(object.field(personsField));
String queue = object.field(queueField).asString();
Optional<String> component = object.field(componentField).valid() ? Optional.of(object.field(componentField).asString()) : Optional.empty();
return Optional.of(new Contact(contactUrl,
propertyUrl,
issueTrackerUrl,
persons,
queue,
component));
}
private void writeContact(Contact contact, Cursor contactCursor) {
contactCursor.setString(contactUrlField, contact.url().toString());
contactCursor.setString(propertyUrlField, contact.propertyUrl().toString());
contactCursor.setString(issueTrackerUrlField, contact.issueTrackerUrl().toString());
Cursor personsArray = contactCursor.setArray(personsField);
contact.persons().forEach(personList -> {
Cursor personArray = personsArray.addArray();
personList.forEach(person -> {
Cursor personObject = personArray.addObject();
personObject.setString(personField, person);
});
});
contactCursor.setString(queueField, contact.queue());
contact.component().ifPresent(component -> contactCursor.setString(componentField, component));
}
private List<List<String>> personsFrom(Inspector array) {
List<List<String>> personLists = new ArrayList<>();
array.traverse((ArrayTraverser) (i, personArray) -> {
List<String> persons = new ArrayList<>();
personArray.traverse((ArrayTraverser) (j, inspector) -> persons.add(inspector.field("person").asString()));
personLists.add(persons);
});
return personLists;
}
private static Tenant.Type typeOf(String value) {
switch (value) {
case "athenz": return Tenant.Type.athenz;
case "cloud": return Tenant.Type.cloud;
case "deleted": return Tenant.Type.deleted;
default: throw new IllegalArgumentException("Unknown tenant type '" + value + "'.");
}
}
private static LastLoginInfo.UserLevel userLevelOf(String value) {
switch (value) {
case "user": return LastLoginInfo.UserLevel.user;
case "developer": return LastLoginInfo.UserLevel.developer;
case "administrator": return LastLoginInfo.UserLevel.administrator;
default: throw new IllegalArgumentException("Unknown user level '" + value + "'.");
}
}
private static String valueOf(LastLoginInfo.UserLevel userLevel) {
switch (userLevel) {
case user: return "user";
case developer: return "developer";
case administrator: return "administrator";
default: throw new IllegalArgumentException("Unexpected user level '" + userLevel + "'.");
}
}
} |
👍 will update. | private HttpResponse userMetadataFromUserId(String email) {
var maybeUser = users.findUser(email);
if (maybeUser.isPresent()) {
var user = maybeUser.get();
var roles = users.listRoles(new UserId(user.email()));
var slime = new Slime();
var root = slime.setObject();
var usersRoot = root.setArray("users");
renderUserMetaData(usersRoot.addObject(), user, Set.copyOf(roles));
return new SlimeJsonResponse(slime);
}
return ErrorResponse.notFoundError("Could not find user: " + email);
} | return ErrorResponse.notFoundError("Could not find user: " + email); | private HttpResponse userMetadataFromUserId(String email) {
var maybeUser = users.findUser(email);
var slime = new Slime();
var root = slime.setObject();
var usersRoot = root.setArray("users");
if (maybeUser.isPresent()) {
var user = maybeUser.get();
var roles = users.listRoles(new UserId(user.email()));
renderUserMetaData(usersRoot.addObject(), user, Set.copyOf(roles));
}
return new SlimeJsonResponse(slime);
} | class UserApiHandler extends ThreadedHttpRequestHandler {
private final static Logger log = Logger.getLogger(UserApiHandler.class.getName());
private final UserManagement users;
private final Controller controller;
private final FlagsDb flagsDb;
private final IntFlag maxTrialTenants;
@Inject
public UserApiHandler(Context parentCtx, UserManagement users, Controller controller, FlagSource flagSource, FlagsDb flagsDb) {
super(parentCtx);
this.users = users;
this.controller = controller;
this.flagsDb = flagsDb;
this.maxTrialTenants = PermanentFlags.MAX_TRIAL_TENANTS.bindTo(flagSource);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case POST: return handlePOST(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/user/v1/user")) return userMetadata(request);
if (path.matches("/user/v1/find")) return findUser(request);
if (path.matches("/user/v1/tenant/{tenant}")) return listTenantRoleMembers(path.get("tenant"));
if (path.matches("/user/v1/tenant/{tenant}/application/{application}")) return listApplicationRoleMembers(path.get("tenant"), path.get("application"));
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/user/v1/tenant/{tenant}")) return addTenantRoleMember(path.get("tenant"), request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/user/v1/tenant/{tenant}")) return removeTenantRoleMember(path.get("tenant"), request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private static final Set<RoleDefinition> hostedOperators = Set.of(
RoleDefinition.hostedOperator,
RoleDefinition.hostedSupporter,
RoleDefinition.hostedAccountant);
private HttpResponse findUser(HttpRequest request) {
var email = request.getProperty("email");
var query = request.getProperty("query");
if (email != null) return userMetadataFromUserId(email);
if (query != null) return userMetadataQuery(query);
return ErrorResponse.badRequest("Need 'email' or 'query' parameter");
}
private HttpResponse userMetadataQuery(String query) {
var userList = users.findUsers(query);
var slime = new Slime();
var root = slime.setObject();
var userSlime = root.setArray("users");
for (var user : userList) {
var roles = users.listRoles(new UserId((user.email())));
renderUserMetaData(userSlime.addObject(), user, Set.copyOf(roles));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse userMetadata(HttpRequest request) {
User user;
if (request.getJDiscRequest().context().get(User.ATTRIBUTE_NAME) instanceof User) {
user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
} else {
@SuppressWarnings("unchecked")
Map<String, String> attr = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class);
user = new User(attr.get("email"), attr.get("name"), attr.get("nickname"), attr.get("picture"));
}
Set<Role> roles = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class).roles();
var slime = new Slime();
renderUserMetaData(slime.setObject(), user, roles);
return new SlimeJsonResponse(slime);
}
private void renderUserMetaData(Cursor root, User user, Set<Role> roles) {
Map<TenantName, List<TenantRole>> tenantRolesByTenantName = roles.stream()
.flatMap(role -> filterTenantRoles(role).stream())
.distinct()
.sorted(Comparator.comparing(Role::definition).reversed())
.collect(Collectors.groupingBy(TenantRole::tenant, Collectors.toList()));
List<Role> operatorRoles = roles.stream()
.filter(role -> hostedOperators.contains(role.definition()))
.sorted(Comparator.comparing(Role::definition))
.toList();
root.setBool("isPublic", controller.system().isPublic());
root.setBool("isCd", controller.system().isCd());
root.setBool("hasTrialCapacity", hasTrialCapacity());
toSlime(root.setObject("user"), user);
Cursor tenants = root.setObject("tenants");
tenantRolesByTenantName.keySet().stream()
.sorted()
.forEach(tenant -> {
Cursor tenantObject = tenants.setObject(tenant.value());
tenantObject.setBool("supported", hasSupportedPlan(tenant));
Cursor tenantRolesObject = tenantObject.setArray("roles");
tenantRolesByTenantName.getOrDefault(tenant, List.of())
.forEach(role -> tenantRolesObject.addString(role.definition().name()));
});
if (!operatorRoles.isEmpty()) {
Cursor operator = root.setArray("operator");
operatorRoles.forEach(role -> operator.addString(role.definition().name()));
}
UserFlagsSerializer.toSlime(root, flagsDb.getAllFlagData(), tenantRolesByTenantName.keySet(), !operatorRoles.isEmpty(), user.email());
}
private HttpResponse listTenantRoleMembers(String tenantName) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("tenant", tenantName);
fillRoles(root,
Roles.tenantRoles(TenantName.from(tenantName)),
Collections.emptyList());
return new SlimeJsonResponse(slime);
}
private HttpResponse listApplicationRoleMembers(String tenantName, String applicationName) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("tenant", tenantName);
root.setString("application", applicationName);
fillRoles(root,
Roles.applicationRoles(TenantName.from(tenantName), ApplicationName.from(applicationName)),
Roles.tenantRoles(TenantName.from(tenantName)));
return new SlimeJsonResponse(slime);
}
private void fillRoles(Cursor root, List<? extends Role> roles, List<? extends Role> superRoles) {
Cursor rolesArray = root.setArray("roleNames");
for (Role role : roles)
rolesArray.addString(valueOf(role));
Map<User, List<Role>> memberships = new LinkedHashMap<>();
List<Role> allRoles = new ArrayList<>(superRoles);
allRoles.addAll(roles);
for (Role role : allRoles)
for (User user : users.listUsers(role)) {
memberships.putIfAbsent(user, new ArrayList<>());
memberships.get(user).add(role);
}
Cursor usersArray = root.setArray("users");
memberships.forEach((user, userRoles) -> {
Cursor userObject = usersArray.addObject();
toSlime(userObject, user);
Cursor rolesObject = userObject.setObject("roles");
for (Role role : roles) {
Cursor roleObject = rolesObject.setObject(valueOf(role));
roleObject.setBool("explicit", userRoles.contains(role));
roleObject.setBool("implied", userRoles.stream().anyMatch(userRole -> userRole.implies(role)));
}
});
}
private static void toSlime(Cursor userObject, User user) {
if (user.name() != null) userObject.setString("name", user.name());
userObject.setString("email", user.email());
if (user.nickname() != null) userObject.setString("nickname", user.nickname());
if (user.picture() != null) userObject.setString("picture", user.picture());
userObject.setBool("verified", user.isVerified());
if (!user.lastLogin().equals(User.NO_DATE))
userObject.setString("lastLogin", user.lastLogin().format(DateTimeFormatter.ISO_DATE));
if (user.loginCount() > -1)
userObject.setLong("loginCount", user.loginCount());
}
private HttpResponse addTenantRoleMember(String tenantName, HttpRequest request) {
Inspector requestObject = bodyInspector(request);
var tenant = TenantName.from(tenantName);
var user = new UserId(require("user", Inspector::asString, requestObject));
var roles = SlimeStream.fromArray(requestObject.field("roles"), Inspector::asString)
.map(roleName -> Roles.toRole(tenant, roleName))
.toList();
users.addToRoles(user, roles);
return new MessageResponse(user + " is now a member of " + roles.stream().map(Role::toString).collect(Collectors.joining(", ")));
}
private HttpResponse removeTenantRoleMember(String tenantName, HttpRequest request) {
Inspector requestObject = bodyInspector(request);
var tenant = TenantName.from(tenantName);
var user = new UserId(require("user", Inspector::asString, requestObject));
var roles = SlimeStream.fromArray(requestObject.field("roles"), Inspector::asString)
.map(roleName -> Roles.toRole(tenant, roleName))
.toList();
enforceLastAdminOfTenant(tenant, user, roles);
removeDeveloperKey(tenant, user, roles);
users.removeFromRoles(user, roles);
controller.tenants().lockIfPresent(tenant, LockedTenant.class, lockedTenant -> {
if (lockedTenant instanceof LockedTenant.Cloud cloudTenant)
controller.tenants().store(cloudTenant.withInvalidateUserSessionsBefore(controller.clock().instant()));
});
return new MessageResponse(user + " is no longer a member of " + roles.stream().map(Role::toString).collect(Collectors.joining(", ")));
}
private void enforceLastAdminOfTenant(TenantName tenantName, UserId user, List<Role> roles) {
for (Role role : roles) {
if (role.definition().equals(RoleDefinition.administrator)) {
if (Set.of(user.value()).equals(users.listUsers(role).stream().map(User::email).collect(Collectors.toSet()))) {
throw new IllegalArgumentException("Can't remove the last administrator of a tenant.");
}
break;
}
}
}
private void removeDeveloperKey(TenantName tenantName, UserId user, List<Role> roles) {
for (Role role : roles) {
if (role.definition().equals(RoleDefinition.developer)) {
controller.tenants().lockIfPresent(tenantName, LockedTenant.Cloud.class, tenant -> {
PublicKey key = tenant.get().developerKeys().inverse().get(new SimplePrincipal(user.value()));
if (key != null)
controller.tenants().store(tenant.withoutDeveloperKey(key));
});
break;
}
}
}
private boolean hasTrialCapacity() {
if (! controller.system().isPublic()) return true;
var existing = controller.tenants().asList().stream().map(Tenant::name).collect(Collectors.toList());
var trialTenants = controller.serviceRegistry().billingController().tenantsWithPlan(existing, PlanId.from("trial"));
return maxTrialTenants.value() < 0 || trialTenants.size() < maxTrialTenants.value();
}
private static Inspector bodyInspector(HttpRequest request) {
return Exceptions.uncheck(() -> SlimeUtils.jsonToSlime(IOUtils.readBytes(request.getData(), 1 << 10)).get());
}
private static <Type> Type require(String name, Function<Inspector, Type> mapper, Inspector object) {
if ( ! object.field(name).valid()) throw new IllegalArgumentException("Missing field '" + name + "'.");
return mapper.apply(object.field(name));
}
private static String valueOf(Role role) {
switch (role.definition()) {
case administrator: return "administrator";
case developer: return "developer";
case reader: return "reader";
case headless: return "headless";
default: throw new IllegalArgumentException("Unexpected role type '" + role.definition() + "'.");
}
}
private static Collection<TenantRole> filterTenantRoles(Role role) {
if (role instanceof TenantRole tenantRole) {
switch (tenantRole.definition()) {
case administrator, developer, reader, hostedDeveloper: return Set.of(tenantRole);
case athenzTenantAdmin: return Roles.tenantRoles(tenantRole.tenant());
}
}
return Set.of();
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> clazz) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(clazz::isInstance)
.map(clazz::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
private boolean hasSupportedPlan(TenantName tenantName) {
var planId = controller.serviceRegistry().billingController().getPlan(tenantName);
return controller.serviceRegistry().planRegistry().plan(planId)
.map(Plan::isSupported)
.orElse(false);
}
} | class UserApiHandler extends ThreadedHttpRequestHandler {
private final static Logger log = Logger.getLogger(UserApiHandler.class.getName());
private final UserManagement users;
private final Controller controller;
private final FlagsDb flagsDb;
private final IntFlag maxTrialTenants;
@Inject
public UserApiHandler(Context parentCtx, UserManagement users, Controller controller, FlagSource flagSource, FlagsDb flagsDb) {
super(parentCtx);
this.users = users;
this.controller = controller;
this.flagsDb = flagsDb;
this.maxTrialTenants = PermanentFlags.MAX_TRIAL_TENANTS.bindTo(flagSource);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case POST: return handlePOST(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/user/v1/user")) return userMetadata(request);
if (path.matches("/user/v1/find")) return findUser(request);
if (path.matches("/user/v1/tenant/{tenant}")) return listTenantRoleMembers(path.get("tenant"));
if (path.matches("/user/v1/tenant/{tenant}/application/{application}")) return listApplicationRoleMembers(path.get("tenant"), path.get("application"));
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/user/v1/tenant/{tenant}")) return addTenantRoleMember(path.get("tenant"), request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/user/v1/tenant/{tenant}")) return removeTenantRoleMember(path.get("tenant"), request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private static final Set<RoleDefinition> hostedOperators = Set.of(
RoleDefinition.hostedOperator,
RoleDefinition.hostedSupporter,
RoleDefinition.hostedAccountant);
private HttpResponse findUser(HttpRequest request) {
var email = request.getProperty("email");
var query = request.getProperty("query");
if (email != null) return userMetadataFromUserId(email);
if (query != null) return userMetadataQuery(query);
return ErrorResponse.badRequest("Need 'email' or 'query' parameter");
}
private HttpResponse userMetadataQuery(String query) {
var userList = users.findUsers(query);
var slime = new Slime();
var root = slime.setObject();
var userSlime = root.setArray("users");
for (var user : userList) {
var roles = users.listRoles(new UserId((user.email())));
renderUserMetaData(userSlime.addObject(), user, Set.copyOf(roles));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse userMetadata(HttpRequest request) {
User user;
if (request.getJDiscRequest().context().get(User.ATTRIBUTE_NAME) instanceof User) {
user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
} else {
@SuppressWarnings("unchecked")
Map<String, String> attr = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class);
user = new User(attr.get("email"), attr.get("name"), attr.get("nickname"), attr.get("picture"));
}
Set<Role> roles = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class).roles();
var slime = new Slime();
renderUserMetaData(slime.setObject(), user, roles);
return new SlimeJsonResponse(slime);
}
private void renderUserMetaData(Cursor root, User user, Set<Role> roles) {
Map<TenantName, List<TenantRole>> tenantRolesByTenantName = roles.stream()
.flatMap(role -> filterTenantRoles(role).stream())
.distinct()
.sorted(Comparator.comparing(Role::definition).reversed())
.collect(Collectors.groupingBy(TenantRole::tenant, Collectors.toList()));
List<Role> operatorRoles = roles.stream()
.filter(role -> hostedOperators.contains(role.definition()))
.sorted(Comparator.comparing(Role::definition))
.toList();
root.setBool("isPublic", controller.system().isPublic());
root.setBool("isCd", controller.system().isCd());
root.setBool("hasTrialCapacity", hasTrialCapacity());
toSlime(root.setObject("user"), user);
Cursor tenants = root.setObject("tenants");
tenantRolesByTenantName.keySet().stream()
.sorted()
.forEach(tenant -> {
Cursor tenantObject = tenants.setObject(tenant.value());
tenantObject.setBool("supported", hasSupportedPlan(tenant));
Cursor tenantRolesObject = tenantObject.setArray("roles");
tenantRolesByTenantName.getOrDefault(tenant, List.of())
.forEach(role -> tenantRolesObject.addString(role.definition().name()));
});
if (!operatorRoles.isEmpty()) {
Cursor operator = root.setArray("operator");
operatorRoles.forEach(role -> operator.addString(role.definition().name()));
}
UserFlagsSerializer.toSlime(root, flagsDb.getAllFlagData(), tenantRolesByTenantName.keySet(), !operatorRoles.isEmpty(), user.email());
}
private HttpResponse listTenantRoleMembers(String tenantName) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("tenant", tenantName);
fillRoles(root,
Roles.tenantRoles(TenantName.from(tenantName)),
Collections.emptyList());
return new SlimeJsonResponse(slime);
}
private HttpResponse listApplicationRoleMembers(String tenantName, String applicationName) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("tenant", tenantName);
root.setString("application", applicationName);
fillRoles(root,
Roles.applicationRoles(TenantName.from(tenantName), ApplicationName.from(applicationName)),
Roles.tenantRoles(TenantName.from(tenantName)));
return new SlimeJsonResponse(slime);
}
private void fillRoles(Cursor root, List<? extends Role> roles, List<? extends Role> superRoles) {
Cursor rolesArray = root.setArray("roleNames");
for (Role role : roles)
rolesArray.addString(valueOf(role));
Map<User, List<Role>> memberships = new LinkedHashMap<>();
List<Role> allRoles = new ArrayList<>(superRoles);
allRoles.addAll(roles);
for (Role role : allRoles)
for (User user : users.listUsers(role)) {
memberships.putIfAbsent(user, new ArrayList<>());
memberships.get(user).add(role);
}
Cursor usersArray = root.setArray("users");
memberships.forEach((user, userRoles) -> {
Cursor userObject = usersArray.addObject();
toSlime(userObject, user);
Cursor rolesObject = userObject.setObject("roles");
for (Role role : roles) {
Cursor roleObject = rolesObject.setObject(valueOf(role));
roleObject.setBool("explicit", userRoles.contains(role));
roleObject.setBool("implied", userRoles.stream().anyMatch(userRole -> userRole.implies(role)));
}
});
}
private static void toSlime(Cursor userObject, User user) {
if (user.name() != null) userObject.setString("name", user.name());
userObject.setString("email", user.email());
if (user.nickname() != null) userObject.setString("nickname", user.nickname());
if (user.picture() != null) userObject.setString("picture", user.picture());
userObject.setBool("verified", user.isVerified());
if (!user.lastLogin().equals(User.NO_DATE))
userObject.setString("lastLogin", user.lastLogin().format(DateTimeFormatter.ISO_DATE));
if (user.loginCount() > -1)
userObject.setLong("loginCount", user.loginCount());
}
private HttpResponse addTenantRoleMember(String tenantName, HttpRequest request) {
Inspector requestObject = bodyInspector(request);
var tenant = TenantName.from(tenantName);
var user = new UserId(require("user", Inspector::asString, requestObject));
var roles = SlimeStream.fromArray(requestObject.field("roles"), Inspector::asString)
.map(roleName -> Roles.toRole(tenant, roleName))
.toList();
users.addToRoles(user, roles);
return new MessageResponse(user + " is now a member of " + roles.stream().map(Role::toString).collect(Collectors.joining(", ")));
}
private HttpResponse removeTenantRoleMember(String tenantName, HttpRequest request) {
Inspector requestObject = bodyInspector(request);
var tenant = TenantName.from(tenantName);
var user = new UserId(require("user", Inspector::asString, requestObject));
var roles = SlimeStream.fromArray(requestObject.field("roles"), Inspector::asString)
.map(roleName -> Roles.toRole(tenant, roleName))
.toList();
enforceLastAdminOfTenant(tenant, user, roles);
removeDeveloperKey(tenant, user, roles);
users.removeFromRoles(user, roles);
controller.tenants().lockIfPresent(tenant, LockedTenant.class, lockedTenant -> {
if (lockedTenant instanceof LockedTenant.Cloud cloudTenant)
controller.tenants().store(cloudTenant.withInvalidateUserSessionsBefore(controller.clock().instant()));
});
return new MessageResponse(user + " is no longer a member of " + roles.stream().map(Role::toString).collect(Collectors.joining(", ")));
}
private void enforceLastAdminOfTenant(TenantName tenantName, UserId user, List<Role> roles) {
for (Role role : roles) {
if (role.definition().equals(RoleDefinition.administrator)) {
if (Set.of(user.value()).equals(users.listUsers(role).stream().map(User::email).collect(Collectors.toSet()))) {
throw new IllegalArgumentException("Can't remove the last administrator of a tenant.");
}
break;
}
}
}
private void removeDeveloperKey(TenantName tenantName, UserId user, List<Role> roles) {
for (Role role : roles) {
if (role.definition().equals(RoleDefinition.developer)) {
controller.tenants().lockIfPresent(tenantName, LockedTenant.Cloud.class, tenant -> {
PublicKey key = tenant.get().developerKeys().inverse().get(new SimplePrincipal(user.value()));
if (key != null)
controller.tenants().store(tenant.withoutDeveloperKey(key));
});
break;
}
}
}
private boolean hasTrialCapacity() {
if (! controller.system().isPublic()) return true;
var existing = controller.tenants().asList().stream().map(Tenant::name).collect(Collectors.toList());
var trialTenants = controller.serviceRegistry().billingController().tenantsWithPlan(existing, PlanId.from("trial"));
return maxTrialTenants.value() < 0 || trialTenants.size() < maxTrialTenants.value();
}
private static Inspector bodyInspector(HttpRequest request) {
return Exceptions.uncheck(() -> SlimeUtils.jsonToSlime(IOUtils.readBytes(request.getData(), 1 << 10)).get());
}
private static <Type> Type require(String name, Function<Inspector, Type> mapper, Inspector object) {
if ( ! object.field(name).valid()) throw new IllegalArgumentException("Missing field '" + name + "'.");
return mapper.apply(object.field(name));
}
private static String valueOf(Role role) {
switch (role.definition()) {
case administrator: return "administrator";
case developer: return "developer";
case reader: return "reader";
case headless: return "headless";
default: throw new IllegalArgumentException("Unexpected role type '" + role.definition() + "'.");
}
}
private static Collection<TenantRole> filterTenantRoles(Role role) {
if (role instanceof TenantRole tenantRole) {
switch (tenantRole.definition()) {
case administrator, developer, reader, hostedDeveloper: return Set.of(tenantRole);
case athenzTenantAdmin: return Roles.tenantRoles(tenantRole.tenant());
}
}
return Set.of();
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> clazz) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(clazz::isInstance)
.map(clazz::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
private boolean hasSupportedPlan(TenantName tenantName) {
var planId = controller.serviceRegistry().billingController().getPlan(tenantName);
return controller.serviceRegistry().planRegistry().plan(planId)
.map(Plan::isSupported)
.orElse(false);
}
} |
Since this is generated from a `Set`, the unpredictable iteration order may result in an unstable test. | public void trusted_certificates_patch() throws IOException {
String url = "http:
tester.assertPartialResponse(new Request(url), "\"trustStore\":[]", true);
String trustStore = "\"trustStore\":[" +
"{" +
"\"fingerprint\":\"foo\"," +
"\"expiry\":1632302251000" +
"}," +
"{" +
"\"fingerprint\":\"bar\"," +
"\"expiry\":1758532706000" +
"}" +
"]";
assertResponse(new Request(url, Utf8.toBytes("{"+trustStore+"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request(url), trustStore, true);
} | String trustStore = "\"trustStore\":[" + | public void trusted_certificates_patch() throws IOException {
String url = "http:
tester.assertPartialResponse(new Request(url), "\"trustStore\":[]", false);
String trustStore = "\"trustStore\":[" +
"{" +
"\"fingerprint\":\"foo\"," +
"\"expiry\":1632302251000" +
"}," +
"{" +
"\"fingerprint\":\"bar\"," +
"\"expiry\":1758532706000" +
"}" +
"]";
assertResponse(new Request(url, Utf8.toBytes("{"+trustStore+"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request(url), trustStore, true);
} | class NodesV2ApiTest {
private RestApiTester tester;
@Before
public void createTester() {
tester = new RestApiTester();
}
@After
public void closeTester() {
tester.close();
}
/** This test gives examples of the node requests that can be made to nodes/v2 */
@Test
public void test_requests() throws Exception {
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertRestart(1, new Request("http:
new byte[0], Request.Method.POST));
assertRestart(2, new Request("http:
new byte[0], Request.Method.POST));
assertRestart(13, new Request("http:
new byte[0], Request.Method.POST));
tester.assertResponseContains(new Request("http:
"\"restartGeneration\":3");
assertReboot(14, new Request("http:
new byte[0], Request.Method.POST));
assertReboot(2, new Request("http:
new byte[0], Request.Method.POST));
assertReboot(19, new Request("http:
new byte[0], Request.Method.POST));
tester.assertResponseContains(new Request("http:
"\"rebootGeneration\":3");
assertResponse(new Request("http:
("[" + asNodeJson("host8.yahoo.com", "default", "127.0.8.1") + "," +
asHostJson("host9.yahoo.com", "large-variant", List.of("node9-1.yahoo.com"), "127.0.9.1", "::9:1") + "," +
asNodeJson("parent2.yahoo.com", NodeType.host, "large-variant", Optional.of(TenantName.from("myTenant")),
Optional.of(ApplicationId.from("tenant1", "app1", "instance1")), Optional.empty(), List.of(), "127.0.127.1", "::127:1") + "," +
asDockerNodeJson("host11.yahoo.com", "parent.host.yahoo.com", "::11") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 4 nodes to the provisioned state\"}");
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
tester.assertResponse(new Request("http:
("[" + asNodeJson("host8.yahoo.com", "default", "127.0.254.8") + "]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot add provisioned host host8.yahoo.com: A node with this name already exists\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
"{\"message\":\"Removed host9.yahoo.com\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host8.yahoo.com to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host8.yahoo.com to ready\"}");
tester.assertResponseContains(new Request("http:
"\"state\":\"ready\"");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host8.yahoo.com to ready\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to failed and marked none as wantToFail\"}");
tester.assertResponseContains(new Request("http:
"\"state\":\"failed\"");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to active\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
"{\"message\":\"Removed host8.yahoo.com\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved test-node-pool-102-2 to failed and marked none as wantToFail\"}");
tester.assertResponseContains(new Request("http:
"\"state\":\"failed\"");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved test-node-pool-102-2 to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved test-node-pool-102-2 to ready\"}");
tester.assertResponse(new Request("http:
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'test-node-pool-102-2'\"}");
assertResponse(new Request("http:
"{\"message\":\"Moved none to failed and marked dockerhost1.yahoo.com, host4.yahoo.com as wantToFail\"}");
assertResponse(new Request("http:
"{\"url\":\"http:
assertResponse(new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentRebootGeneration\": 1}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"flavor\": \"d-2-8-100\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentVespaVersion\": \"5.104.142\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"parentHostname\": \"parent.yahoo.com\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"ipAddresses\": [\"127.0.0.1\",\"::1\"]}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToRetire\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentVespaVersion\": \"6.43.0\",\"currentDockerImage\": \"docker-registry.domain.tld:8080/dist/vespa:6.45.0\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"openStackId\": \"patched-openstackid\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"modelName\": \"foo\"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToRetire\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToDeprovision\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToDeprovision\": false, \"wantToRetire\": false}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToDeprovision\": true, \"wantToRetire\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost2.yahoo.com\"}");
tester.assertResponseContains(new Request("http:
"\"wantToRetire\":true,\"preferToRetire\":false,\"wantToDeprovision\":false,");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToRebuild\": true, \"wantToRetire\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertResponseContains(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"modelName\": null}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request("http:
tester.container().handleRequest((new Request("http:
((OrchestratorMock) tester.container().components().getComponent(OrchestratorMock.class.getName()))
.suspend(new HostName("host4.yahoo.com"));
assertFile(new Request("http:
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved dockerhost1.yahoo.com to parked\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
"{\"message\":\"Removed dockerhost1.yahoo.com\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"deprovisioned host dockerhost1.yahoo.com is rebuilding and cannot be forgotten\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToRebuild\": false}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
"{\"message\":\"Permanently removed dockerhost1.yahoo.com\"}");
}
@Test
public void test_application_requests() throws Exception {
assertFile(new Request("http:
assertFile(new Request("http:
"application1.json");
assertFile(new Request("http:
"application2.json");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentReadShare\": 0.3, " +
"\"maxReadShare\": 0.5 }"), Request.Method.PATCH),
"{\"message\":\"Updated application 'tenant1.application1.instance1'\"}");
}
@Test
public void post_with_patch_method_override_in_header_is_handled_as_patch() throws Exception {
Request req = new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"), Request.Method.POST);
req.getHeaders().add("X-HTTP-Method-Override", "PATCH");
assertResponse(req, "{\"message\":\"Updated host4.yahoo.com\"}");
}
@Test
public void post_with_invalid_method_override_in_header_gives_sane_error_message() throws Exception {
Request req = new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"), Request.Method.POST);
req.getHeaders().add("X-HTTP-Method-Override", "GET");
tester.assertResponse(req, 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Illegal X-HTTP-Method-Override header for POST request. Accepts 'PATCH' but got 'GET'\"}");
}
@Test
public void post_node_with_ip_address() throws Exception {
assertResponse(new Request("http:
("[" + asNodeJson("ipv4-host.yahoo.com", "default","127.0.0.1") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertResponse(new Request("http:
("[" + asNodeJson("ipv6-host.yahoo.com", "default", "::1") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertResponse(new Request("http:
("[" + asNodeJson("dual-stack-host.yahoo.com", "default", "127.0.254.254", "::254:254") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
}
@Test
public void post_node_with_duplicate_ip_address() throws Exception {
Request req = new Request("http:
("[" + asNodeJson("host-with-ip.yahoo.com", "default", "foo") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST);
tester.assertResponse(req, 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid IP address 'foo': 'foo' is not an IP string literal.\"}");
tester.assertResponse(new Request("http:
"[" + asNodeJson("tenant-node-foo.yahoo.com", "default", "127.0.1.1") + "]",
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot assign [127.0.1.1] to tenant-node-foo.yahoo.com: [127.0.1.1] already assigned to host1.yahoo.com\"}");
tester.assertResponse(new Request("http:
"{\"ipAddresses\": [\"127.0.2.1\"]}",
Request.Method.PATCH), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'ipAddresses': Cannot assign [127.0.2.1] to test-node-pool-102-2: [127.0.2.1] already assigned to host2.yahoo.com\"}");
tester.assertResponse(new Request("http:
"[" + asHostJson("host200.yahoo.com", "default", List.of(), "127.0.2.1") + "]",
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot assign [127.0.2.1] to host200.yahoo.com: [127.0.2.1] already assigned to host2.yahoo.com\"}");
tester.assertResponse(new Request("http:
"{\"ipAddresses\": [\"::104:3\"]}",
Request.Method.PATCH), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'ipAddresses': Cannot assign [::100:4, ::100:3, ::100:2, ::104:3] to dockerhost1.yahoo.com: [::104:3] already assigned to dockerhost5.yahoo.com\"}");
tester.assertResponse(new Request("http:
"[" + asNodeJson("cfghost42.yahoo.com", NodeType.confighost, "default", Optional.empty(), Optional.empty(), Optional.empty(), List.of(), "127.0.42.1") + "]",
Request.Method.POST), 200,
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponse(new Request("http:
"[" + asDockerNodeJson("cfg42.yahoo.com", NodeType.config, "cfghost42.yahoo.com", "127.0.42.1") + "]",
Request.Method.POST), 200,
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponse(new Request("http:
"[" + asDockerNodeJson("proxy42.yahoo.com", NodeType.proxy, "cfghost42.yahoo.com", "127.0.42.1") + "]",
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot assign [127.0.42.1] to proxy42.yahoo.com: [127.0.42.1] already assigned to cfg42.yahoo.com\"}");
tester.assertResponse(new Request("http:
"[" + asNodeJson("cfghost43.yahoo.com", NodeType.confighost, "default", Optional.empty(), Optional.empty(), Optional.empty(), List.of(), "127.0.43.1") + "]",
Request.Method.POST), 200,
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponse(new Request("http:
"{\"ipAddresses\": [\"127.0.43.1\"]}",
Request.Method.PATCH), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'ipAddresses': Cannot assign [127.0.43.1] to cfg42.yahoo.com: [127.0.43.1] already assigned to cfghost43.yahoo.com\"}");
}
@Test
public void patch_hostnames() throws IOException {
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"additionalHostnames\": [\"a\",\"b\"]}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"additionalHostnames\": []}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertFile(new Request("http:
}
@Test
public void post_controller_node() throws Exception {
String data = "[{\"hostname\":\"controller1.yahoo.com\", \"openStackId\":\"fake-controller1.yahoo.com\"," +
createIpAddresses("127.0.0.1") +
"\"flavor\":\"default\"" +
", \"type\":\"controller\"}]";
assertResponse(new Request("http:
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertFile(new Request("http:
}
@Test
public void fails_to_ready_node_with_hard_fail() throws Exception {
assertResponse(new Request("http:
("[" + asHostJson("host12.yahoo.com", "default", List.of()) + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
String msg = "Actual disk space (2TB) differs from spec (3TB)";
assertResponse(new Request("http:
Utf8.toBytes("{\"reports\":{\"diskSpace\":{\"createdMillis\":2,\"description\":\"" + msg + "\",\"type\": \"HARD_FAIL\"}}}"),
Request.Method.PATCH),
"{\"message\":\"Updated host12.yahoo.com\"}");
tester.assertResponse(new Request("http:
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"provisioned host host12.yahoo.com cannot be readied because it has " +
"hard failures: [diskSpace reported 1970-01-01T00:00:00.002Z: " + msg + "]\"}");
}
@Test
public void patching_dirty_node_does_not_increase_reboot_generation() throws Exception {
assertResponse(new Request("http:
("[" + asNodeJson("foo.yahoo.com", "default") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to failed and marked none as wantToFail\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to dirty\"}");
tester.assertResponseContains(new Request("http:
"\"rebootGeneration\":0");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentRebootGeneration\": 42}"), Request.Method.PATCH),
"{\"message\":\"Updated foo.yahoo.com\"}");
tester.assertResponseContains(new Request("http:
"\"rebootGeneration\":0");
}
@Test
public void acl_request_by_tenant_node() throws Exception {
String hostname = "foo.yahoo.com";
assertResponse(new Request("http:
("[" + asNodeJson(hostname, "default", "127.0.222.1") + "]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to ready\"}");
assertFile(new Request("http:
}
@Test
public void acl_request_by_config_server() throws Exception {
assertFile(new Request("http:
}
@Test
public void test_invalid_requests() throws Exception {
tester.assertResponse(new Request("http:
new byte[0], Request.Method.GET),
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'node-does-not-exist'\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'node-does-not-exist'\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host1.yahoo.com to failed and marked none as wantToFail\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot make failed host host1.yahoo.com allocated to tenant1.application1.instance1 as 'container/id1/0/0' available for new allocation as it is not in state [dirty]\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host1.yahoo.com to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host1.yahoo.com to ready\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to parked\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot make parked host host2.yahoo.com allocated to tenant2.application2.instance2 as 'content/id2/0/0/stateful' available for new allocation as it is not in state [dirty]\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to ready\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'host2.yahoo.com'\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"active child node host4.yahoo.com allocated to tenant3.application3.instance3 as 'content/id3/0/0/stateful' is currently allocated and cannot be removed while in active\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": \"1\"}"), Request.Method.PATCH),
400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'currentRestartGeneration': Expected a LONG value, got a STRING\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"flavor\": 1}"), Request.Method.PATCH),
400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'flavor': Expected a STRING value, got a LONG\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.PUT), 404,
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'host2.yahoo.com'\"}");
tester.assertResponse(new Request("http:
("[" + asNodeJson("host8.yahoo.com", "default", "127.0.254.1", "::254:1") + "," +
asNodeJson("host8.yahoo.com", "large-variant", "127.0.253.1", "::253:1") + "]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot add nodes: provisioned host host8.yahoo.com is duplicated in the argument list\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"modelName\": \"foo\"}"), Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'modelName': A child node cannot have model name set\"}");
}
@Test
public void test_node_patching() throws Exception {
assertResponse(new Request("http:
Utf8.toBytes("{" +
"\"currentRestartGeneration\": 1," +
"\"currentRebootGeneration\": 3," +
"\"flavor\": \"medium-disk\"," +
"\"currentVespaVersion\": \"5.104.142\"," +
"\"failCount\": 0," +
"\"parentHostname\": \"parent.yahoo.com\"" +
"}"
),
Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"),
Request.Method.PATCH),
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node found with hostname doesnotexist.yahoo.com\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'currentRestartGeneration': Node is not allocated\"}");
}
@Test
public void test_node_patch_to_remove_docker_ready_fields() throws Exception {
assertResponse(new Request("http:
Utf8.toBytes("{" +
"\"currentVespaVersion\": \"\"," +
"\"currentDockerImage\": \"\"" +
"}"
),
Request.Method.PATCH),
"{\"message\":\"Updated host5.yahoo.com\"}");
assertFile(new Request("http:
}
@Test
public void test_reports_patching() throws IOException {
assertResponse(new Request("http:
Utf8.toBytes("{" +
" \"reports\": {" +
" \"actualCpuCores\": {" +
" \"createdMillis\": 1, " +
" \"description\": \"Actual number of CPU cores (2) differs from spec (4)\"," +
" \"type\": \"HARD_FAIL\"," +
" \"value\":2" +
" }," +
" \"diskSpace\": {" +
" \"createdMillis\": 2, " +
" \"description\": \"Actual disk space (2TB) differs from spec (3TB)\"," +
" \"type\": \"HARD_FAIL\"," +
" \"details\": {" +
" \"inGib\": 3," +
" \"disks\": [\"/dev/sda1\", \"/dev/sdb3\"]" +
" }" +
" }" +
" }" +
"}"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"reports\": {}}"),
Request.Method.PATCH),
200,
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
tester.assertResponse(new Request("http:
Utf8.toBytes("{" +
" \"reports\": {" +
" \"actualCpuCores\": {" +
" \"createdMillis\": 3 " +
" }" +
" }" +
"}"),
Request.Method.PATCH),
200,
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"reports\": { \"diskSpace\": null } }"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"reports\": null }"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
}
@Test
public void test_upgrade() throws IOException {
assertResponse(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.123.456 for nodes of type config\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.123.456 for nodes of type confighost\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.123.456 for nodes of type controller\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.456\",\"controller\":\"6.123.456\"},\"osVersions\":{},\"dockerImages\":{}}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Target version for type tenant is not allowed\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"At least one of 'version', 'osVersion' or 'dockerImage' must be set\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.1\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot downgrade version without setting 'force'. " +
"Current target version: 6.123.456, attempted to set target version: 6.123.1\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.1\",\"force\":true}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.123.1 for nodes of type confighost\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.1\",\"controller\":\"6.123.456\"},\"osVersions\":{},\"dockerImages\":{}}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"version\": null}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot downgrade version without setting 'force'. Current target version: 6.123.1, attempted to set target version: 0.0.0\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": null, \"force\": true}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 0.0.0 for nodes of type confighost\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"controller\":\"6.123.456\"},\"osVersions\":{},\"dockerImages\":{}}");
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.5.2, upgradeBudget to PT0S for nodes of type confighost\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.5.2, upgradeBudget to PT0S for nodes of type host\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"controller\":\"6.123.456\"},\"osVersions\":{\"host\":\"7.5.2\",\"confighost\":\"7.5.2\"},\"dockerImages\":{}}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.124.42\", \"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.124.42, osVersion to 7.5.2, upgradeBudget to PT0S for nodes of type confighost\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Node type 'config' does not support OS upgrades\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.4.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot set target OS version to 7.4.2 without setting 'force', as it's lower than the current version: 7.5.2\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.4.2\", \"force\": true, \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.4.2, upgradeBudget to PT0S for nodes of type confighost\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": null}"),
Request.Method.PATCH),
200,
"{\"message\":\"Set osVersion to null for nodes of type confighost\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"dockerImage\": \"my-repo.my-domain.example:1234/repo/tenant\"}"),
Request.Method.PATCH),
"{\"message\":\"Set container image to my-repo.my-domain.example:1234/repo/tenant for nodes of type tenant\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"dockerImage\": \"my-repo.my-domain.example:1234/repo/image\"}"),
Request.Method.PATCH),
"{\"message\":\"Set container image to my-repo.my-domain.example:1234/repo/image for nodes of type config\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.124.42\",\"controller\":\"6.123.456\"},\"osVersions\":{\"host\":\"7.5.2\"},\"dockerImages\":{\"tenant\":\"my-repo.my-domain.example:1234/repo/tenant\",\"config\":\"my-repo.my-domain.example:1234/repo/image\"}}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"dockerImage\": \"my-repo.my-domain.example:1234/repo/image\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Setting container image for confighost nodes is unsupported\"}");
}
@Test
public void test_os_version() throws Exception {
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.5.2, upgradeBudget to PT0S for nodes of type host\"}");
var nodeRepository = (NodeRepository)tester.container().components().getComponent(MockNodeRepository.class.getName());
var osUpgradeActivator = new OsUpgradeActivator(nodeRepository, Duration.ofDays(1), new TestMetric());
osUpgradeActivator.run();
Response r = tester.container().handleRequest(new Request("http:
assertFalse("Response omits wantedOsVersions field", r.getBodyAsString().contains("wantedOsVersion"));
assertResponse(new Request("http:
Utf8.toBytes("{\"currentOsVersion\": \"7.5.2\"}"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"currentOsVersion\": \"7.5.2\"}"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost2.yahoo.com\"}");
assertResponse(new Request("http:
"{\"nodes\":[" +
"{\"url\":\"http:
"{\"url\":\"http:
"]}");
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.42.1\", \"upgradeBudget\": \"PT24H\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.42.1, upgradeBudget to PT24H for nodes of type host\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.42.1\", \"upgradeBudget\": \"foo\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid duration 'foo': Text cannot be parsed to a Duration\"}");
}
@Test
public void test_firmware_upgrades() throws IOException {
assertResponse(new Request("http:
Utf8.toBytes("{\"currentFirmwareCheck\":100}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
"{\"message\":\"Will request firmware checks on all hosts.\"}");
assertFile(new Request("http:
"dockerhost1-with-firmware-data.json");
assertFile(new Request("http:
"node1.json");
assertResponse(new Request("http:
"{\"message\":\"Cancelled outstanding requests for firmware checks\"}");
}
@Test
public void test_capacity() throws Exception {
assertFile(new Request("http:
assertFile(new Request("http:
List<String> hostsToRemove = List.of(
"dockerhost1.yahoo.com",
"dockerhost2.yahoo.com",
"dockerhost3.yahoo.com",
"dockerhost4.yahoo.com"
);
String requestUriTemplate = "http:
assertFile(new Request(String.format(requestUriTemplate,
String.join(",", hostsToRemove.subList(0, 3)))),
"capacity-hostremoval-possible.json");
assertFile(new Request(String.format(requestUriTemplate,
String.join(",", hostsToRemove))),
"capacity-hostremoval-impossible.json");
}
/** Tests the rendering of each node separately to make it easier to find errors */
@Test
public void test_single_node_rendering() throws Exception {
for (int i = 1; i <= 14; i++) {
if (i == 8 || i == 9 || i == 11 || i == 12) continue;
assertFile(new Request("http:
}
}
@Test
public void test_flavor_overrides() throws Exception {
String host = "parent2.yahoo.com";
tester.assertResponse(new Request("http:
("[{\"hostname\":\"" + host + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," +
"\"flavor\":\"large-variant\",\"resources\":{\"diskGb\":1234,\"memoryGb\":4321}}]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can only override disk GB for configured flavor\"}");
assertResponse(new Request("http:
("[{\"hostname\":\"" + host + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," +
"\"flavor\":\"large-variant\",\"type\":\"host\",\"resources\":{\"diskGb\":1234}}]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponseContains(new Request("http:
"\"resources\":{\"vcpu\":64.0,\"memoryGb\":128.0,\"diskGb\":1234.0,\"bandwidthGbps\":15.0,\"diskSpeed\":\"fast\",\"storageType\":\"remote\"}");
String tenant = "node-1-3.yahoo.com";
String resources = "\"resources\":{\"vcpu\":64.0,\"memoryGb\":128.0,\"diskGb\":1234.0,\"bandwidthGbps\":15.0,\"diskSpeed\":\"slow\",\"storageType\":\"remote\"}";
assertResponse(new Request("http:
("[{\"hostname\":\"" + tenant + "\"," + createIpAddresses("::2") + "\"openStackId\":\"osid-124\"," +
"\"type\":\"tenant\"," + resources + "}]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponseContains(new Request("http:
tester.assertResponse(new Request("http:
"{\"minDiskAvailableGb\":5432,\"minMainMemoryAvailableGb\":2345}".getBytes(StandardCharsets.UTF_8),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'minMainMemoryAvailableGb': Can only override disk GB for configured flavor\"}");
assertResponse(new Request("http:
"{\"minDiskAvailableGb\":5432}".getBytes(StandardCharsets.UTF_8),
Request.Method.PATCH),
"{\"message\":\"Updated " + host + "\"}");
tester.assertResponseContains(new Request("http:
"\"resources\":{\"vcpu\":64.0,\"memoryGb\":128.0,\"diskGb\":5432.0,\"bandwidthGbps\":15.0,\"diskSpeed\":\"fast\",\"storageType\":\"remote\"}");
}
@Test
public void test_node_resources() throws Exception {
String hostname = "node123.yahoo.com";
String resources = "\"resources\":{\"vcpu\":5.0,\"memoryGb\":4321.0,\"diskGb\":1234.0,\"bandwidthGbps\":0.3,\"diskSpeed\":\"slow\",\"storageType\":\"local\"}";
tester.assertResponse(new Request("http:
("[{\"hostname\":\"" + hostname + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," +
resources.replace("\"memoryGb\":4321.0,", "") + "}]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Required field 'memoryGb' is missing\"}");
assertResponse(new Request("http:
("[{\"hostname\":\"" + hostname + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," + resources + "}]")
.getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponseContains(new Request("http:
assertResponse(new Request("http:
"{\"diskGb\":12,\"memoryGb\":34,\"vcpu\":56,\"fastDisk\":true,\"remoteStorage\":true,\"bandwidthGbps\":78.0}".getBytes(StandardCharsets.UTF_8),
Request.Method.PATCH),
"{\"message\":\"Updated " + hostname + "\"}");
tester.assertResponseContains(new Request("http:
"\"resources\":{\"vcpu\":56.0,\"memoryGb\":34.0,\"diskGb\":12.0,\"bandwidthGbps\":78.0,\"diskSpeed\":\"fast\",\"storageType\":\"remote\"}");
}
@Test
public void test_node_switch_hostname() throws Exception {
String hostname = "host42.yahoo.com";
String json = asNodeJson(hostname, NodeType.host, "default", Optional.empty(), Optional.empty(),
Optional.of("switch0"), List.of(), "127.0.42.1", "::42:1");
assertResponse(new Request("http:
("[" + json + "]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponseContains(new Request("http:
json = "{\"switchHostname\":\"switch1\"}";
assertResponse(new Request("http:
"{\"message\":\"Updated host42.yahoo.com\"}");
tester.assertResponseContains(new Request("http:
json = "{\"switchHostname\":null}";
assertResponse(new Request("http:
"{\"message\":\"Updated host42.yahoo.com\"}");
tester.assertPartialResponse(new Request("http:
}
@Test
public void exclusive_to_patch() throws IOException {
String url = "http:
tester.assertPartialResponse(new Request(url), "exclusiveTo", false);
assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveToApplicationId\": \"t1:a1:i1\"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request(url), "exclusiveTo\":\"t1:a1:i1\",", true);
assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveToClusterType\": \"admin\"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request(url), "exclusiveTo\":\"t1:a1:i1\",\"exclusiveToClusterType\":\"admin\",", true);
assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveTo\": null, \"exclusiveToClusterType\": null}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request(url), "exclusiveTo", false);
}
@Test
public void archive_uris() throws IOException {
tester.assertPartialResponse(new Request("http:
tester.assertResponse(new Request("http:
assertResponse(new Request("http:
"{\"message\":\"Updated archive URI for tenant3\"}");
assertResponse(new Request("http:
"{\"message\":\"Updated archive URI for tenant2\"}");
tester.assertPartialResponse(new Request("http:
assertFile(new Request("http:
tester.assertResponse(new Request("http:
tester.assertPartialResponse(new Request("http:
}
@Test
private static String asDockerNodeJson(String hostname, String parentHostname, String... ipAddress) {
return asDockerNodeJson(hostname, NodeType.tenant, parentHostname, ipAddress);
}
private static String asDockerNodeJson(String hostname, NodeType nodeType, String parentHostname, String... ipAddress) {
return "{\"hostname\":\"" + hostname + "\", \"parentHostname\":\"" + parentHostname + "\"," +
createIpAddresses(ipAddress) +
"\"openStackId\":\"" + hostname + "\",\"flavor\":\"d-1-1-100\"" +
(nodeType != NodeType.tenant ? ",\"type\":\"" + nodeType + "\"" : "") +
"}";
}
private static String asNodeJson(String hostname, String flavor, String... ipAddress) {
return "{\"hostname\":\"" + hostname + "\", \"openStackId\":\"" + hostname + "\"," +
createIpAddresses(ipAddress) +
"\"flavor\":\"" + flavor + "\"}";
}
private static String asHostJson(String hostname, String flavor, List<String> additionalHostnames, String... ipAddress) {
return asNodeJson(hostname, NodeType.host, flavor, Optional.empty(), Optional.empty(), Optional.empty(),
additionalHostnames, ipAddress);
}
private static String asNodeJson(String hostname, NodeType nodeType, String flavor, Optional<TenantName> reservedTo,
Optional<ApplicationId> exclusiveTo, Optional<String> switchHostname,
List<String> additionalHostnames, String... ipAddress) {
return "{\"hostname\":\"" + hostname + "\", \"openStackId\":\"" + hostname + "\"," +
createIpAddresses(ipAddress) +
"\"flavor\":\"" + flavor + "\"" +
(reservedTo.map(tenantName -> ", \"reservedTo\":\"" + tenantName.value() + "\"").orElse("")) +
(exclusiveTo.map(appId -> ", \"exclusiveTo\":\"" + appId.serializedForm() + "\"").orElse("")) +
(switchHostname.map(s -> ", \"switchHostname\":\"" + s + "\"").orElse("")) +
(additionalHostnames.isEmpty() ? "" : ", \"additionalHostnames\":[\"" +
String.join("\",\"", additionalHostnames) + "\"]") +
", \"type\":\"" + nodeType + "\"}";
}
private static String createIpAddresses(String... ipAddress) {
return "\"ipAddresses\":[" +
Arrays.stream(ipAddress)
.map(ip -> "\"" + ip + "\"")
.collect(Collectors.joining(",")) +
"],";
}
private void assertRestart(int restartCount, Request request) throws IOException {
tester.assertResponse(request, 200, "{\"message\":\"Scheduled restart of " + restartCount + " matching nodes\"}");
}
private void assertReboot(int rebootCount, Request request) throws IOException {
tester.assertResponse(request, 200, "{\"message\":\"Scheduled reboot of " + rebootCount + " matching nodes\"}");
}
private void assertFile(Request request, String file) throws IOException {
tester.assertFile(request, file);
}
private void assertResponse(Request request, String response) throws IOException {
tester.assertResponse(request, response);
}
} | class NodesV2ApiTest {
private RestApiTester tester;
@Before
public void createTester() {
tester = new RestApiTester();
}
@After
public void closeTester() {
tester.close();
}
/** This test gives examples of the node requests that can be made to nodes/v2 */
@Test
public void test_requests() throws Exception {
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertRestart(1, new Request("http:
new byte[0], Request.Method.POST));
assertRestart(2, new Request("http:
new byte[0], Request.Method.POST));
assertRestart(13, new Request("http:
new byte[0], Request.Method.POST));
tester.assertResponseContains(new Request("http:
"\"restartGeneration\":3");
assertReboot(14, new Request("http:
new byte[0], Request.Method.POST));
assertReboot(2, new Request("http:
new byte[0], Request.Method.POST));
assertReboot(19, new Request("http:
new byte[0], Request.Method.POST));
tester.assertResponseContains(new Request("http:
"\"rebootGeneration\":3");
assertResponse(new Request("http:
("[" + asNodeJson("host8.yahoo.com", "default", "127.0.8.1") + "," +
asHostJson("host9.yahoo.com", "large-variant", List.of("node9-1.yahoo.com"), "127.0.9.1", "::9:1") + "," +
asNodeJson("parent2.yahoo.com", NodeType.host, "large-variant", Optional.of(TenantName.from("myTenant")),
Optional.of(ApplicationId.from("tenant1", "app1", "instance1")), Optional.empty(), List.of(), "127.0.127.1", "::127:1") + "," +
asDockerNodeJson("host11.yahoo.com", "parent.host.yahoo.com", "::11") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 4 nodes to the provisioned state\"}");
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
assertFile(new Request("http:
tester.assertResponse(new Request("http:
("[" + asNodeJson("host8.yahoo.com", "default", "127.0.254.8") + "]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot add provisioned host host8.yahoo.com: A node with this name already exists\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
"{\"message\":\"Removed host9.yahoo.com\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host8.yahoo.com to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host8.yahoo.com to ready\"}");
tester.assertResponseContains(new Request("http:
"\"state\":\"ready\"");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host8.yahoo.com to ready\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to failed and marked none as wantToFail\"}");
tester.assertResponseContains(new Request("http:
"\"state\":\"failed\"");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to active\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
"{\"message\":\"Removed host8.yahoo.com\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved test-node-pool-102-2 to failed and marked none as wantToFail\"}");
tester.assertResponseContains(new Request("http:
"\"state\":\"failed\"");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved test-node-pool-102-2 to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved test-node-pool-102-2 to ready\"}");
tester.assertResponse(new Request("http:
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'test-node-pool-102-2'\"}");
assertResponse(new Request("http:
"{\"message\":\"Moved none to failed and marked dockerhost1.yahoo.com, host4.yahoo.com as wantToFail\"}");
assertResponse(new Request("http:
"{\"url\":\"http:
assertResponse(new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentRebootGeneration\": 1}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"flavor\": \"d-2-8-100\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentVespaVersion\": \"5.104.142\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"parentHostname\": \"parent.yahoo.com\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"ipAddresses\": [\"127.0.0.1\",\"::1\"]}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToRetire\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentVespaVersion\": \"6.43.0\",\"currentDockerImage\": \"docker-registry.domain.tld:8080/dist/vespa:6.45.0\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"openStackId\": \"patched-openstackid\"}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"modelName\": \"foo\"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToRetire\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToDeprovision\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToDeprovision\": false, \"wantToRetire\": false}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToDeprovision\": true, \"wantToRetire\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost2.yahoo.com\"}");
tester.assertResponseContains(new Request("http:
"\"wantToRetire\":true,\"preferToRetire\":false,\"wantToDeprovision\":false,");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToRebuild\": true, \"wantToRetire\": true}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertResponseContains(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"modelName\": null}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request("http:
tester.container().handleRequest((new Request("http:
((OrchestratorMock) tester.container().components().getComponent(OrchestratorMock.class.getName()))
.suspend(new HostName("host4.yahoo.com"));
assertFile(new Request("http:
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved dockerhost1.yahoo.com to parked\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
"{\"message\":\"Removed dockerhost1.yahoo.com\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"deprovisioned host dockerhost1.yahoo.com is rebuilding and cannot be forgotten\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"wantToRebuild\": false}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
"{\"message\":\"Permanently removed dockerhost1.yahoo.com\"}");
}
@Test
public void test_application_requests() throws Exception {
assertFile(new Request("http:
assertFile(new Request("http:
"application1.json");
assertFile(new Request("http:
"application2.json");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentReadShare\": 0.3, " +
"\"maxReadShare\": 0.5 }"), Request.Method.PATCH),
"{\"message\":\"Updated application 'tenant1.application1.instance1'\"}");
}
@Test
public void post_with_patch_method_override_in_header_is_handled_as_patch() throws Exception {
Request req = new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"), Request.Method.POST);
req.getHeaders().add("X-HTTP-Method-Override", "PATCH");
assertResponse(req, "{\"message\":\"Updated host4.yahoo.com\"}");
}
@Test
public void post_with_invalid_method_override_in_header_gives_sane_error_message() throws Exception {
Request req = new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"), Request.Method.POST);
req.getHeaders().add("X-HTTP-Method-Override", "GET");
tester.assertResponse(req, 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Illegal X-HTTP-Method-Override header for POST request. Accepts 'PATCH' but got 'GET'\"}");
}
@Test
public void post_node_with_ip_address() throws Exception {
assertResponse(new Request("http:
("[" + asNodeJson("ipv4-host.yahoo.com", "default","127.0.0.1") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertResponse(new Request("http:
("[" + asNodeJson("ipv6-host.yahoo.com", "default", "::1") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertResponse(new Request("http:
("[" + asNodeJson("dual-stack-host.yahoo.com", "default", "127.0.254.254", "::254:254") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
}
@Test
public void post_node_with_duplicate_ip_address() throws Exception {
Request req = new Request("http:
("[" + asNodeJson("host-with-ip.yahoo.com", "default", "foo") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST);
tester.assertResponse(req, 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid IP address 'foo': 'foo' is not an IP string literal.\"}");
tester.assertResponse(new Request("http:
"[" + asNodeJson("tenant-node-foo.yahoo.com", "default", "127.0.1.1") + "]",
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot assign [127.0.1.1] to tenant-node-foo.yahoo.com: [127.0.1.1] already assigned to host1.yahoo.com\"}");
tester.assertResponse(new Request("http:
"{\"ipAddresses\": [\"127.0.2.1\"]}",
Request.Method.PATCH), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'ipAddresses': Cannot assign [127.0.2.1] to test-node-pool-102-2: [127.0.2.1] already assigned to host2.yahoo.com\"}");
tester.assertResponse(new Request("http:
"[" + asHostJson("host200.yahoo.com", "default", List.of(), "127.0.2.1") + "]",
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot assign [127.0.2.1] to host200.yahoo.com: [127.0.2.1] already assigned to host2.yahoo.com\"}");
tester.assertResponse(new Request("http:
"{\"ipAddresses\": [\"::104:3\"]}",
Request.Method.PATCH), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'ipAddresses': Cannot assign [::100:4, ::100:3, ::100:2, ::104:3] to dockerhost1.yahoo.com: [::104:3] already assigned to dockerhost5.yahoo.com\"}");
tester.assertResponse(new Request("http:
"[" + asNodeJson("cfghost42.yahoo.com", NodeType.confighost, "default", Optional.empty(), Optional.empty(), Optional.empty(), List.of(), "127.0.42.1") + "]",
Request.Method.POST), 200,
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponse(new Request("http:
"[" + asDockerNodeJson("cfg42.yahoo.com", NodeType.config, "cfghost42.yahoo.com", "127.0.42.1") + "]",
Request.Method.POST), 200,
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponse(new Request("http:
"[" + asDockerNodeJson("proxy42.yahoo.com", NodeType.proxy, "cfghost42.yahoo.com", "127.0.42.1") + "]",
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot assign [127.0.42.1] to proxy42.yahoo.com: [127.0.42.1] already assigned to cfg42.yahoo.com\"}");
tester.assertResponse(new Request("http:
"[" + asNodeJson("cfghost43.yahoo.com", NodeType.confighost, "default", Optional.empty(), Optional.empty(), Optional.empty(), List.of(), "127.0.43.1") + "]",
Request.Method.POST), 200,
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponse(new Request("http:
"{\"ipAddresses\": [\"127.0.43.1\"]}",
Request.Method.PATCH), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'ipAddresses': Cannot assign [127.0.43.1] to cfg42.yahoo.com: [127.0.43.1] already assigned to cfghost43.yahoo.com\"}");
}
@Test
public void patch_hostnames() throws IOException {
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"additionalHostnames\": [\"a\",\"b\"]}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"additionalHostnames\": []}"), Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
assertFile(new Request("http:
}
@Test
public void post_controller_node() throws Exception {
String data = "[{\"hostname\":\"controller1.yahoo.com\", \"openStackId\":\"fake-controller1.yahoo.com\"," +
createIpAddresses("127.0.0.1") +
"\"flavor\":\"default\"" +
", \"type\":\"controller\"}]";
assertResponse(new Request("http:
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertFile(new Request("http:
}
@Test
public void fails_to_ready_node_with_hard_fail() throws Exception {
assertResponse(new Request("http:
("[" + asHostJson("host12.yahoo.com", "default", List.of()) + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
String msg = "Actual disk space (2TB) differs from spec (3TB)";
assertResponse(new Request("http:
Utf8.toBytes("{\"reports\":{\"diskSpace\":{\"createdMillis\":2,\"description\":\"" + msg + "\",\"type\": \"HARD_FAIL\"}}}"),
Request.Method.PATCH),
"{\"message\":\"Updated host12.yahoo.com\"}");
tester.assertResponse(new Request("http:
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"provisioned host host12.yahoo.com cannot be readied because it has " +
"hard failures: [diskSpace reported 1970-01-01T00:00:00.002Z: " + msg + "]\"}");
}
@Test
public void patching_dirty_node_does_not_increase_reboot_generation() throws Exception {
assertResponse(new Request("http:
("[" + asNodeJson("foo.yahoo.com", "default") + "]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to failed and marked none as wantToFail\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to dirty\"}");
tester.assertResponseContains(new Request("http:
"\"rebootGeneration\":0");
assertResponse(new Request("http:
Utf8.toBytes("{\"currentRebootGeneration\": 42}"), Request.Method.PATCH),
"{\"message\":\"Updated foo.yahoo.com\"}");
tester.assertResponseContains(new Request("http:
"\"rebootGeneration\":0");
}
@Test
public void acl_request_by_tenant_node() throws Exception {
String hostname = "foo.yahoo.com";
assertResponse(new Request("http:
("[" + asNodeJson(hostname, "default", "127.0.222.1") + "]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to ready\"}");
assertFile(new Request("http:
}
@Test
public void acl_request_by_config_server() throws Exception {
assertFile(new Request("http:
}
@Test
public void test_invalid_requests() throws Exception {
tester.assertResponse(new Request("http:
new byte[0], Request.Method.GET),
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'node-does-not-exist'\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'node-does-not-exist'\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host1.yahoo.com to failed and marked none as wantToFail\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot make failed host host1.yahoo.com allocated to tenant1.application1.instance1 as 'container/id1/0/0' available for new allocation as it is not in state [dirty]\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host1.yahoo.com to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host1.yahoo.com to ready\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to parked\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot make parked host host2.yahoo.com allocated to tenant2.application2.instance2 as 'content/id2/0/0/stateful' available for new allocation as it is not in state [dirty]\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to dirty\"}");
assertResponse(new Request("http:
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved host2.yahoo.com to ready\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'host2.yahoo.com'\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.DELETE),
400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"active child node host4.yahoo.com allocated to tenant3.application3.instance3 as 'content/id3/0/0/stateful' is currently allocated and cannot be removed while in active\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": \"1\"}"), Request.Method.PATCH),
400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'currentRestartGeneration': Expected a LONG value, got a STRING\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"flavor\": 1}"), Request.Method.PATCH),
400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'flavor': Expected a STRING value, got a LONG\"}");
tester.assertResponse(new Request("http:
new byte[0], Request.Method.PUT), 404,
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'host2.yahoo.com'\"}");
tester.assertResponse(new Request("http:
("[" + asNodeJson("host8.yahoo.com", "default", "127.0.254.1", "::254:1") + "," +
asNodeJson("host8.yahoo.com", "large-variant", "127.0.253.1", "::253:1") + "]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST), 400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot add nodes: provisioned host host8.yahoo.com is duplicated in the argument list\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"modelName\": \"foo\"}"), Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'modelName': A child node cannot have model name set\"}");
}
@Test
public void test_node_patching() throws Exception {
assertResponse(new Request("http:
Utf8.toBytes("{" +
"\"currentRestartGeneration\": 1," +
"\"currentRebootGeneration\": 3," +
"\"flavor\": \"medium-disk\"," +
"\"currentVespaVersion\": \"5.104.142\"," +
"\"failCount\": 0," +
"\"parentHostname\": \"parent.yahoo.com\"" +
"}"
),
Request.Method.PATCH),
"{\"message\":\"Updated host4.yahoo.com\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"),
Request.Method.PATCH),
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node found with hostname doesnotexist.yahoo.com\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"currentRestartGeneration\": 1}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'currentRestartGeneration': Node is not allocated\"}");
}
@Test
public void test_node_patch_to_remove_docker_ready_fields() throws Exception {
assertResponse(new Request("http:
Utf8.toBytes("{" +
"\"currentVespaVersion\": \"\"," +
"\"currentDockerImage\": \"\"" +
"}"
),
Request.Method.PATCH),
"{\"message\":\"Updated host5.yahoo.com\"}");
assertFile(new Request("http:
}
@Test
public void test_reports_patching() throws IOException {
assertResponse(new Request("http:
Utf8.toBytes("{" +
" \"reports\": {" +
" \"actualCpuCores\": {" +
" \"createdMillis\": 1, " +
" \"description\": \"Actual number of CPU cores (2) differs from spec (4)\"," +
" \"type\": \"HARD_FAIL\"," +
" \"value\":2" +
" }," +
" \"diskSpace\": {" +
" \"createdMillis\": 2, " +
" \"description\": \"Actual disk space (2TB) differs from spec (3TB)\"," +
" \"type\": \"HARD_FAIL\"," +
" \"details\": {" +
" \"inGib\": 3," +
" \"disks\": [\"/dev/sda1\", \"/dev/sdb3\"]" +
" }" +
" }" +
" }" +
"}"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"reports\": {}}"),
Request.Method.PATCH),
200,
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
tester.assertResponse(new Request("http:
Utf8.toBytes("{" +
" \"reports\": {" +
" \"actualCpuCores\": {" +
" \"createdMillis\": 3 " +
" }" +
" }" +
"}"),
Request.Method.PATCH),
200,
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"reports\": { \"diskSpace\": null } }"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"reports\": null }"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
}
@Test
public void test_upgrade() throws IOException {
assertResponse(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.123.456 for nodes of type config\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.123.456 for nodes of type confighost\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.123.456 for nodes of type controller\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.456\",\"controller\":\"6.123.456\"},\"osVersions\":{},\"dockerImages\":{}}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Target version for type tenant is not allowed\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"At least one of 'version', 'osVersion' or 'dockerImage' must be set\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.1\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot downgrade version without setting 'force'. " +
"Current target version: 6.123.456, attempted to set target version: 6.123.1\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.123.1\",\"force\":true}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.123.1 for nodes of type confighost\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.1\",\"controller\":\"6.123.456\"},\"osVersions\":{},\"dockerImages\":{}}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"version\": null}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot downgrade version without setting 'force'. Current target version: 6.123.1, attempted to set target version: 0.0.0\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": null, \"force\": true}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 0.0.0 for nodes of type confighost\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"controller\":\"6.123.456\"},\"osVersions\":{},\"dockerImages\":{}}");
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.5.2, upgradeBudget to PT0S for nodes of type confighost\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.5.2, upgradeBudget to PT0S for nodes of type host\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"controller\":\"6.123.456\"},\"osVersions\":{\"host\":\"7.5.2\",\"confighost\":\"7.5.2\"},\"dockerImages\":{}}");
assertResponse(new Request("http:
Utf8.toBytes("{\"version\": \"6.124.42\", \"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.124.42, osVersion to 7.5.2, upgradeBudget to PT0S for nodes of type confighost\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Node type 'config' does not support OS upgrades\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.4.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot set target OS version to 7.4.2 without setting 'force', as it's lower than the current version: 7.5.2\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.4.2\", \"force\": true, \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.4.2, upgradeBudget to PT0S for nodes of type confighost\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": null}"),
Request.Method.PATCH),
200,
"{\"message\":\"Set osVersion to null for nodes of type confighost\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"dockerImage\": \"my-repo.my-domain.example:1234/repo/tenant\"}"),
Request.Method.PATCH),
"{\"message\":\"Set container image to my-repo.my-domain.example:1234/repo/tenant for nodes of type tenant\"}");
assertResponse(new Request("http:
Utf8.toBytes("{\"dockerImage\": \"my-repo.my-domain.example:1234/repo/image\"}"),
Request.Method.PATCH),
"{\"message\":\"Set container image to my-repo.my-domain.example:1234/repo/image for nodes of type config\"}");
assertResponse(new Request("http:
"{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.124.42\",\"controller\":\"6.123.456\"},\"osVersions\":{\"host\":\"7.5.2\"},\"dockerImages\":{\"tenant\":\"my-repo.my-domain.example:1234/repo/tenant\",\"config\":\"my-repo.my-domain.example:1234/repo/image\"}}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"dockerImage\": \"my-repo.my-domain.example:1234/repo/image\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Setting container image for confighost nodes is unsupported\"}");
}
@Test
public void test_os_version() throws Exception {
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.5.2\", \"upgradeBudget\": \"PT0S\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.5.2, upgradeBudget to PT0S for nodes of type host\"}");
var nodeRepository = (NodeRepository)tester.container().components().getComponent(MockNodeRepository.class.getName());
var osUpgradeActivator = new OsUpgradeActivator(nodeRepository, Duration.ofDays(1), new TestMetric());
osUpgradeActivator.run();
Response r = tester.container().handleRequest(new Request("http:
assertFalse("Response omits wantedOsVersions field", r.getBodyAsString().contains("wantedOsVersion"));
assertResponse(new Request("http:
Utf8.toBytes("{\"currentOsVersion\": \"7.5.2\"}"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertFile(new Request("http:
assertResponse(new Request("http:
Utf8.toBytes("{\"currentOsVersion\": \"7.5.2\"}"),
Request.Method.PATCH),
"{\"message\":\"Updated dockerhost2.yahoo.com\"}");
assertResponse(new Request("http:
"{\"nodes\":[" +
"{\"url\":\"http:
"{\"url\":\"http:
"]}");
assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.42.1\", \"upgradeBudget\": \"PT24H\"}"),
Request.Method.PATCH),
"{\"message\":\"Set osVersion to 7.42.1, upgradeBudget to PT24H for nodes of type host\"}");
tester.assertResponse(new Request("http:
Utf8.toBytes("{\"osVersion\": \"7.42.1\", \"upgradeBudget\": \"foo\"}"),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid duration 'foo': Text cannot be parsed to a Duration\"}");
}
@Test
public void test_firmware_upgrades() throws IOException {
assertResponse(new Request("http:
Utf8.toBytes("{\"currentFirmwareCheck\":100}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http:
"{\"message\":\"Will request firmware checks on all hosts.\"}");
assertFile(new Request("http:
"dockerhost1-with-firmware-data.json");
assertFile(new Request("http:
"node1.json");
assertResponse(new Request("http:
"{\"message\":\"Cancelled outstanding requests for firmware checks\"}");
}
@Test
public void test_capacity() throws Exception {
assertFile(new Request("http:
assertFile(new Request("http:
List<String> hostsToRemove = List.of(
"dockerhost1.yahoo.com",
"dockerhost2.yahoo.com",
"dockerhost3.yahoo.com",
"dockerhost4.yahoo.com"
);
String requestUriTemplate = "http:
assertFile(new Request(String.format(requestUriTemplate,
String.join(",", hostsToRemove.subList(0, 3)))),
"capacity-hostremoval-possible.json");
assertFile(new Request(String.format(requestUriTemplate,
String.join(",", hostsToRemove))),
"capacity-hostremoval-impossible.json");
}
/** Tests the rendering of each node separately to make it easier to find errors */
@Test
public void test_single_node_rendering() throws Exception {
for (int i = 1; i <= 14; i++) {
if (i == 8 || i == 9 || i == 11 || i == 12) continue;
assertFile(new Request("http:
}
}
@Test
public void test_flavor_overrides() throws Exception {
String host = "parent2.yahoo.com";
tester.assertResponse(new Request("http:
("[{\"hostname\":\"" + host + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," +
"\"flavor\":\"large-variant\",\"resources\":{\"diskGb\":1234,\"memoryGb\":4321}}]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can only override disk GB for configured flavor\"}");
assertResponse(new Request("http:
("[{\"hostname\":\"" + host + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," +
"\"flavor\":\"large-variant\",\"type\":\"host\",\"resources\":{\"diskGb\":1234}}]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponseContains(new Request("http:
"\"resources\":{\"vcpu\":64.0,\"memoryGb\":128.0,\"diskGb\":1234.0,\"bandwidthGbps\":15.0,\"diskSpeed\":\"fast\",\"storageType\":\"remote\"}");
String tenant = "node-1-3.yahoo.com";
String resources = "\"resources\":{\"vcpu\":64.0,\"memoryGb\":128.0,\"diskGb\":1234.0,\"bandwidthGbps\":15.0,\"diskSpeed\":\"slow\",\"storageType\":\"remote\"}";
assertResponse(new Request("http:
("[{\"hostname\":\"" + tenant + "\"," + createIpAddresses("::2") + "\"openStackId\":\"osid-124\"," +
"\"type\":\"tenant\"," + resources + "}]").
getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponseContains(new Request("http:
tester.assertResponse(new Request("http:
"{\"minDiskAvailableGb\":5432,\"minMainMemoryAvailableGb\":2345}".getBytes(StandardCharsets.UTF_8),
Request.Method.PATCH),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not set field 'minMainMemoryAvailableGb': Can only override disk GB for configured flavor\"}");
assertResponse(new Request("http:
"{\"minDiskAvailableGb\":5432}".getBytes(StandardCharsets.UTF_8),
Request.Method.PATCH),
"{\"message\":\"Updated " + host + "\"}");
tester.assertResponseContains(new Request("http:
"\"resources\":{\"vcpu\":64.0,\"memoryGb\":128.0,\"diskGb\":5432.0,\"bandwidthGbps\":15.0,\"diskSpeed\":\"fast\",\"storageType\":\"remote\"}");
}
@Test
public void test_node_resources() throws Exception {
String hostname = "node123.yahoo.com";
String resources = "\"resources\":{\"vcpu\":5.0,\"memoryGb\":4321.0,\"diskGb\":1234.0,\"bandwidthGbps\":0.3,\"diskSpeed\":\"slow\",\"storageType\":\"local\"}";
tester.assertResponse(new Request("http:
("[{\"hostname\":\"" + hostname + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," +
resources.replace("\"memoryGb\":4321.0,", "") + "}]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Required field 'memoryGb' is missing\"}");
assertResponse(new Request("http:
("[{\"hostname\":\"" + hostname + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," + resources + "}]")
.getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponseContains(new Request("http:
assertResponse(new Request("http:
"{\"diskGb\":12,\"memoryGb\":34,\"vcpu\":56,\"fastDisk\":true,\"remoteStorage\":true,\"bandwidthGbps\":78.0}".getBytes(StandardCharsets.UTF_8),
Request.Method.PATCH),
"{\"message\":\"Updated " + hostname + "\"}");
tester.assertResponseContains(new Request("http:
"\"resources\":{\"vcpu\":56.0,\"memoryGb\":34.0,\"diskGb\":12.0,\"bandwidthGbps\":78.0,\"diskSpeed\":\"fast\",\"storageType\":\"remote\"}");
}
@Test
public void test_node_switch_hostname() throws Exception {
String hostname = "host42.yahoo.com";
String json = asNodeJson(hostname, NodeType.host, "default", Optional.empty(), Optional.empty(),
Optional.of("switch0"), List.of(), "127.0.42.1", "::42:1");
assertResponse(new Request("http:
("[" + json + "]").getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
tester.assertResponseContains(new Request("http:
json = "{\"switchHostname\":\"switch1\"}";
assertResponse(new Request("http:
"{\"message\":\"Updated host42.yahoo.com\"}");
tester.assertResponseContains(new Request("http:
json = "{\"switchHostname\":null}";
assertResponse(new Request("http:
"{\"message\":\"Updated host42.yahoo.com\"}");
tester.assertPartialResponse(new Request("http:
}
@Test
public void exclusive_to_patch() throws IOException {
String url = "http:
tester.assertPartialResponse(new Request(url), "exclusiveTo", false);
assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveToApplicationId\": \"t1:a1:i1\"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request(url), "exclusiveTo\":\"t1:a1:i1\",", true);
assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveToClusterType\": \"admin\"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request(url), "exclusiveTo\":\"t1:a1:i1\",\"exclusiveToClusterType\":\"admin\",", true);
assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveTo\": null, \"exclusiveToClusterType\": null}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
tester.assertPartialResponse(new Request(url), "exclusiveTo", false);
}
@Test
public void archive_uris() throws IOException {
tester.assertPartialResponse(new Request("http:
tester.assertResponse(new Request("http:
assertResponse(new Request("http:
"{\"message\":\"Updated archive URI for tenant3\"}");
assertResponse(new Request("http:
"{\"message\":\"Updated archive URI for tenant2\"}");
tester.assertPartialResponse(new Request("http:
assertFile(new Request("http:
tester.assertResponse(new Request("http:
tester.assertPartialResponse(new Request("http:
}
@Test
private static String asDockerNodeJson(String hostname, String parentHostname, String... ipAddress) {
return asDockerNodeJson(hostname, NodeType.tenant, parentHostname, ipAddress);
}
private static String asDockerNodeJson(String hostname, NodeType nodeType, String parentHostname, String... ipAddress) {
return "{\"hostname\":\"" + hostname + "\", \"parentHostname\":\"" + parentHostname + "\"," +
createIpAddresses(ipAddress) +
"\"openStackId\":\"" + hostname + "\",\"flavor\":\"d-1-1-100\"" +
(nodeType != NodeType.tenant ? ",\"type\":\"" + nodeType + "\"" : "") +
"}";
}
private static String asNodeJson(String hostname, String flavor, String... ipAddress) {
return "{\"hostname\":\"" + hostname + "\", \"openStackId\":\"" + hostname + "\"," +
createIpAddresses(ipAddress) +
"\"flavor\":\"" + flavor + "\"}";
}
private static String asHostJson(String hostname, String flavor, List<String> additionalHostnames, String... ipAddress) {
return asNodeJson(hostname, NodeType.host, flavor, Optional.empty(), Optional.empty(), Optional.empty(),
additionalHostnames, ipAddress);
}
private static String asNodeJson(String hostname, NodeType nodeType, String flavor, Optional<TenantName> reservedTo,
Optional<ApplicationId> exclusiveTo, Optional<String> switchHostname,
List<String> additionalHostnames, String... ipAddress) {
return "{\"hostname\":\"" + hostname + "\", \"openStackId\":\"" + hostname + "\"," +
createIpAddresses(ipAddress) +
"\"flavor\":\"" + flavor + "\"" +
(reservedTo.map(tenantName -> ", \"reservedTo\":\"" + tenantName.value() + "\"").orElse("")) +
(exclusiveTo.map(appId -> ", \"exclusiveTo\":\"" + appId.serializedForm() + "\"").orElse("")) +
(switchHostname.map(s -> ", \"switchHostname\":\"" + s + "\"").orElse("")) +
(additionalHostnames.isEmpty() ? "" : ", \"additionalHostnames\":[\"" +
String.join("\",\"", additionalHostnames) + "\"]") +
", \"type\":\"" + nodeType + "\"}";
}
private static String createIpAddresses(String... ipAddress) {
return "\"ipAddresses\":[" +
Arrays.stream(ipAddress)
.map(ip -> "\"" + ip + "\"")
.collect(Collectors.joining(",")) +
"],";
}
private void assertRestart(int restartCount, Request request) throws IOException {
tester.assertResponse(request, 200, "{\"message\":\"Scheduled restart of " + restartCount + " matching nodes\"}");
}
private void assertReboot(int rebootCount, Request request) throws IOException {
tester.assertResponse(request, 200, "{\"message\":\"Scheduled reboot of " + rebootCount + " matching nodes\"}");
}
private void assertFile(Request request, String file) throws IOException {
tester.assertFile(request, file);
}
private void assertResponse(Request request, String response) throws IOException {
tester.assertResponse(request, response);
}
} |
s/:/=/ so it matches the existing ones. | public String toString() {
return Stream.of(hostId.map(id -> "hostId=" + id),
restartGeneration.map(gen -> "restartGeneration=" + gen),
rebootGeneration.map(gen -> "rebootGeneration=" + gen),
dockerImage.map(img -> "dockerImage=" + img.asString()),
vespaVersion.map(ver -> "vespaVersion=" + ver.toFullString()),
currentOsVersion.map(ver -> "currentOsVersion=" + ver.toFullString()),
currentFirmwareCheck.map(at -> "currentFirmwareCheck=" + at),
Optional.ofNullable(reports.isEmpty() ? null : "reports=" + reports),
Optional.of("trustStore:" + trustStore))
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.joining(", ", "{", "}"));
} | Optional.of("trustStore:" + trustStore)) | public String toString() {
return Stream.of(hostId.map(id -> "hostId=" + id),
restartGeneration.map(gen -> "restartGeneration=" + gen),
rebootGeneration.map(gen -> "rebootGeneration=" + gen),
dockerImage.map(img -> "dockerImage=" + img.asString()),
vespaVersion.map(ver -> "vespaVersion=" + ver.toFullString()),
currentOsVersion.map(ver -> "currentOsVersion=" + ver.toFullString()),
currentFirmwareCheck.map(at -> "currentFirmwareCheck=" + at),
Optional.ofNullable(reports.isEmpty() ? null : "reports=" + reports),
Optional.ofNullable(trustStore.isEmpty() ? null : "trustStore=" + trustStore))
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.joining(", ", "{", "}"));
} | class NodeAttributes {
private Optional<String> hostId = Optional.empty();
private Optional<Long> restartGeneration = Optional.empty();
private Optional<Long> rebootGeneration = Optional.empty();
private Optional<DockerImage> dockerImage = Optional.empty();
private Optional<Version> vespaVersion = Optional.empty();
private Optional<Version> currentOsVersion = Optional.empty();
private Optional<Instant> currentFirmwareCheck = Optional.empty();
private Set<TrustStoreItem> trustStore = Set.of();
/** The list of reports to patch. A null value is used to remove the report. */
private Map<String, JsonNode> reports = new TreeMap<>();
public NodeAttributes() { }
public NodeAttributes withHostId(String hostId) {
this.hostId = Optional.of(hostId);
return this;
}
public NodeAttributes withRestartGeneration(Optional<Long> restartGeneration) {
this.restartGeneration = restartGeneration;
return this;
}
public NodeAttributes withRestartGeneration(long restartGeneration) {
return withRestartGeneration(Optional.of(restartGeneration));
}
public NodeAttributes withRebootGeneration(long rebootGeneration) {
this.rebootGeneration = Optional.of(rebootGeneration);
return this;
}
public NodeAttributes withDockerImage(DockerImage dockerImage) {
this.dockerImage = Optional.of(dockerImage);
return this;
}
public NodeAttributes withVespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.of(vespaVersion);
return this;
}
public NodeAttributes withCurrentOsVersion(Version currentOsVersion) {
this.currentOsVersion = Optional.of(currentOsVersion);
return this;
}
public NodeAttributes withCurrentFirmwareCheck(Instant currentFirmwareCheck) {
this.currentFirmwareCheck = Optional.of(currentFirmwareCheck);
return this;
}
public NodeAttributes withReports(Map<String, JsonNode> nodeReports) {
this.reports = new TreeMap<>(nodeReports);
return this;
}
public NodeAttributes withReport(String reportId, JsonNode jsonNode) {
reports.put(reportId, jsonNode);
return this;
}
public NodeAttributes withReportRemoved(String reportId) {
reports.put(reportId, null);
return this;
}
public Optional<String> getHostId() {
return hostId;
}
public Optional<Long> getRestartGeneration() {
return restartGeneration;
}
public Optional<Long> getRebootGeneration() {
return rebootGeneration;
}
public Optional<DockerImage> getDockerImage() {
return dockerImage;
}
public Optional<Version> getVespaVersion() {
return vespaVersion;
}
public Optional<Version> getCurrentOsVersion() {
return currentOsVersion;
}
public Optional<Instant> getCurrentFirmwareCheck() {
return currentFirmwareCheck;
}
public Map<String, JsonNode> getReports() {
return reports;
}
@Override
public int hashCode() {
return Objects.hash(hostId, restartGeneration, rebootGeneration, dockerImage, vespaVersion, currentOsVersion,
currentFirmwareCheck, reports);
}
public boolean isEmpty() {
return equals(new NodeAttributes());
}
@Override
public boolean equals(final Object o) {
if (!(o instanceof NodeAttributes)) {
return false;
}
final NodeAttributes other = (NodeAttributes) o;
return Objects.equals(hostId, other.hostId)
&& Objects.equals(restartGeneration, other.restartGeneration)
&& Objects.equals(rebootGeneration, other.rebootGeneration)
&& Objects.equals(dockerImage, other.dockerImage)
&& Objects.equals(vespaVersion, other.vespaVersion)
&& Objects.equals(currentOsVersion, other.currentOsVersion)
&& Objects.equals(currentFirmwareCheck, other.currentFirmwareCheck)
&& Objects.equals(reports, other.reports)
&& Objects.equals(trustStore, other.trustStore);
}
public NodeAttributes withTrustStore(Set<TrustStoreItem> trustStore) {
this.trustStore = Set.copyOf(trustStore);
return this;
}
public Set<TrustStoreItem> getTrustStore() {
return trustStore;
}
@Override
} | class NodeAttributes {
private Optional<String> hostId = Optional.empty();
private Optional<Long> restartGeneration = Optional.empty();
private Optional<Long> rebootGeneration = Optional.empty();
private Optional<DockerImage> dockerImage = Optional.empty();
private Optional<Version> vespaVersion = Optional.empty();
private Optional<Version> currentOsVersion = Optional.empty();
private Optional<Instant> currentFirmwareCheck = Optional.empty();
private List<TrustStoreItem> trustStore = List.of();
/** The list of reports to patch. A null value is used to remove the report. */
private Map<String, JsonNode> reports = new TreeMap<>();
public NodeAttributes() { }
public NodeAttributes withHostId(String hostId) {
this.hostId = Optional.of(hostId);
return this;
}
public NodeAttributes withRestartGeneration(Optional<Long> restartGeneration) {
this.restartGeneration = restartGeneration;
return this;
}
public NodeAttributes withRestartGeneration(long restartGeneration) {
return withRestartGeneration(Optional.of(restartGeneration));
}
public NodeAttributes withRebootGeneration(long rebootGeneration) {
this.rebootGeneration = Optional.of(rebootGeneration);
return this;
}
public NodeAttributes withDockerImage(DockerImage dockerImage) {
this.dockerImage = Optional.of(dockerImage);
return this;
}
public NodeAttributes withVespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.of(vespaVersion);
return this;
}
public NodeAttributes withCurrentOsVersion(Version currentOsVersion) {
this.currentOsVersion = Optional.of(currentOsVersion);
return this;
}
public NodeAttributes withCurrentFirmwareCheck(Instant currentFirmwareCheck) {
this.currentFirmwareCheck = Optional.of(currentFirmwareCheck);
return this;
}
public NodeAttributes withReports(Map<String, JsonNode> nodeReports) {
this.reports = new TreeMap<>(nodeReports);
return this;
}
public NodeAttributes withReport(String reportId, JsonNode jsonNode) {
reports.put(reportId, jsonNode);
return this;
}
public NodeAttributes withReportRemoved(String reportId) {
reports.put(reportId, null);
return this;
}
public Optional<String> getHostId() {
return hostId;
}
public Optional<Long> getRestartGeneration() {
return restartGeneration;
}
public Optional<Long> getRebootGeneration() {
return rebootGeneration;
}
public Optional<DockerImage> getDockerImage() {
return dockerImage;
}
public Optional<Version> getVespaVersion() {
return vespaVersion;
}
public Optional<Version> getCurrentOsVersion() {
return currentOsVersion;
}
public Optional<Instant> getCurrentFirmwareCheck() {
return currentFirmwareCheck;
}
public Map<String, JsonNode> getReports() {
return reports;
}
@Override
public int hashCode() {
return Objects.hash(hostId, restartGeneration, rebootGeneration, dockerImage, vespaVersion, currentOsVersion,
currentFirmwareCheck, reports);
}
public boolean isEmpty() {
return equals(new NodeAttributes());
}
@Override
public boolean equals(final Object o) {
if (!(o instanceof NodeAttributes)) {
return false;
}
final NodeAttributes other = (NodeAttributes) o;
return Objects.equals(hostId, other.hostId)
&& Objects.equals(restartGeneration, other.restartGeneration)
&& Objects.equals(rebootGeneration, other.rebootGeneration)
&& Objects.equals(dockerImage, other.dockerImage)
&& Objects.equals(vespaVersion, other.vespaVersion)
&& Objects.equals(currentOsVersion, other.currentOsVersion)
&& Objects.equals(currentFirmwareCheck, other.currentFirmwareCheck)
&& Objects.equals(reports, other.reports)
&& Objects.equals(trustStore, other.trustStore);
}
public NodeAttributes withTrustStore(List<TrustStoreItem> trustStore) {
this.trustStore = List.copyOf(trustStore);
return this;
}
public List<TrustStoreItem> getTrustStore() {
return trustStore;
}
@Override
} |
I feel the naming "max index" is a bit ambiguous here since it's really tracking the (logical, not physical) node count instead of the highest index (due to +1), and with "max index" it's easy to assume it's the highest distribution key present since index and distribution key are used interchangeably as terms. Maybe `logicalNodeCount` could work? | void setNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
if (index >= maxIndex) {
maxIndex = index + 1;
}
setNodeStateInternal(index, ns);
} | maxIndex = index + 1; | void setNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
if (index >= logicalNodeCount) {
logicalNodeCount = index + 1;
}
setNodeStateInternal(index, ns);
} | class Nodes {
private int maxIndex;
private final NodeType type;
private final BitSet upNodes;
private final Map<Integer, NodeState> nodeStates = new HashMap<>();
Nodes(NodeType type) {
this.type = type;
upNodes = new BitSet();
}
Nodes(Nodes b) {
maxIndex = b.maxIndex;
type = b.type;
upNodes = (BitSet) b.upNodes.clone();
b.nodeStates.forEach((key, value) -> nodeStates.put(key, value.clone()));
}
void updateMaxIndex(int index) {
if (index > maxIndex) {
upNodes.set(maxIndex, index);
maxIndex = index;
}
}
int getMaxIndex() { return maxIndex; }
NodeState getNodeState(int index) {
NodeState ns = nodeStates.get(index);
if (ns != null) return ns;
return (index >= getMaxIndex() || ! upNodes.get(index))
? defaultDown()
: defaultUp();
}
private void validateInput(Node node, NodeState ns) {
ns.verifyValidInSystemState(node.getType());
if (node.getType() != type) {
throw new IllegalArgumentException("NodeType '" + node.getType() + "' differs from '" + type + "'");
}
}
void addNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
updateMaxIndex(index + 1);
setNodeStateInternal(index, ns);
}
private static boolean equalsWithDescription(NodeState a, NodeState b) {
return a.equals(b) && ((a.getState() != State.DOWN) || a.getDescription().equals(b.getDescription()));
}
private void setNodeStateInternal(int index, NodeState ns) {
nodeStates.remove(index);
if (ns.getState() == State.DOWN) {
upNodes.clear(index);
if ( ! equalsWithDescription(defaultDown(), ns)) {
nodeStates.put(index, ns);
}
} else {
upNodes.set(index);
if ( ! equalsWithDescription(defaultUp(), ns)) {
nodeStates.put(index, ns);
}
}
}
boolean similarToImpl(Nodes other, final NodeStateCmp nodeStateCmp) {
if (maxIndex != other.maxIndex) return false;
if (type != other.type) return false;
if ( ! upNodes.equals(other.upNodes)) return false;
for (Integer node : unionNodeSetWith(other.nodeStates.keySet())) {
final NodeState lhs = nodeStates.get(node);
final NodeState rhs = other.nodeStates.get(node);
if (!nodeStateCmp.similar(type, lhs, rhs)) {
return false;
}
}
return true;
}
private Set<Integer> unionNodeSetWith(final Set<Integer> otherNodes) {
final Set<Integer> unionNodeSet = new HashSet<>(nodeStates.keySet());
unionNodeSet.addAll(otherNodes);
return unionNodeSet;
}
@Override
public String toString() { return toString(false); }
String toString(boolean verbose) {
StringBuilder sb = new StringBuilder();
int nodeCount = verbose ? getMaxIndex() : upNodes.length();
if ( nodeCount > 0 ) {
sb.append(type == NodeType.DISTRIBUTOR ? " distributor:" : " storage:").append(nodeCount);
for (int i = 0; i < nodeCount; i++) {
String nodeState = getNodeState(i).serialize(i, verbose);
if (!nodeState.isEmpty()) {
sb.append(' ').append(nodeState);
}
}
}
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (! (obj instanceof Nodes)) return false;
Nodes b = (Nodes) obj;
if (maxIndex != b.maxIndex) return false;
if (type != b.type) return false;
if (!upNodes.equals(b.upNodes)) return false;
if (!nodeStates.equals(b.nodeStates)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(maxIndex, type, nodeStates, upNodes);
}
private NodeState defaultDown() {
return type == NodeType.STORAGE
? DEFAULT_STORAGE_DOWN_NODE_STATE
: DEFAULT_DISTRIBUTOR_DOWN_NODE_STATE;
}
private NodeState defaultUp() {
return defaultUpNodeState(type);
}
} | class Nodes {
private int logicalNodeCount;
private final NodeType type;
private final BitSet upNodes;
private final Map<Integer, NodeState> nodeStates = new HashMap<>();
Nodes(NodeType type) {
this.type = type;
upNodes = new BitSet();
}
Nodes(Nodes b) {
logicalNodeCount = b.logicalNodeCount;
type = b.type;
upNodes = (BitSet) b.upNodes.clone();
b.nodeStates.forEach((key, value) -> nodeStates.put(key, value.clone()));
}
void updateMaxIndex(int index) {
if (index > logicalNodeCount) {
upNodes.set(logicalNodeCount, index);
logicalNodeCount = index;
}
}
int getLogicalNodeCount() { return logicalNodeCount; }
NodeState getNodeState(int index) {
NodeState ns = nodeStates.get(index);
if (ns != null) return ns;
return (index >= getLogicalNodeCount() || ! upNodes.get(index))
? new NodeState(type, State.DOWN)
: new NodeState(type, State.UP);
}
private void validateInput(Node node, NodeState ns) {
ns.verifyValidInSystemState(node.getType());
if (node.getType() != type) {
throw new IllegalArgumentException("NodeType '" + node.getType() + "' differs from '" + type + "'");
}
}
void addNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
updateMaxIndex(index + 1);
setNodeStateInternal(index, ns);
}
private static boolean equalsWithDescription(NodeState a, NodeState b) {
return a.equals(b) && ((a.getState() != State.DOWN) || a.getDescription().equals(b.getDescription()));
}
private void setNodeStateInternal(int index, NodeState ns) {
nodeStates.remove(index);
if (ns.getState() == State.DOWN) {
upNodes.clear(index);
if ( ! equalsWithDescription(defaultDown(), ns)) {
nodeStates.put(index, ns);
}
} else {
upNodes.set(index);
if ( ! equalsWithDescription(defaultUp(), ns)) {
nodeStates.put(index, ns);
}
}
}
boolean similarToImpl(Nodes other, final NodeStateCmp nodeStateCmp) {
if (logicalNodeCount != other.logicalNodeCount) return false;
if (type != other.type) return false;
if ( ! upNodes.equals(other.upNodes)) return false;
for (Integer node : unionNodeSetWith(other.nodeStates.keySet())) {
final NodeState lhs = nodeStates.get(node);
final NodeState rhs = other.nodeStates.get(node);
if (!nodeStateCmp.similar(type, lhs, rhs)) {
return false;
}
}
return true;
}
private Set<Integer> unionNodeSetWith(final Set<Integer> otherNodes) {
final Set<Integer> unionNodeSet = new HashSet<>(nodeStates.keySet());
unionNodeSet.addAll(otherNodes);
return unionNodeSet;
}
@Override
public String toString() { return toString(false); }
String toString(boolean verbose) {
StringBuilder sb = new StringBuilder();
int nodeCount = verbose ? getLogicalNodeCount() : upNodes.length();
if ( nodeCount > 0 ) {
sb.append(type == NodeType.DISTRIBUTOR ? " distributor:" : " storage:").append(nodeCount);
for (int i = 0; i < nodeCount; i++) {
String nodeState = getNodeState(i).serialize(i, verbose);
if (!nodeState.isEmpty()) {
sb.append(' ').append(nodeState);
}
}
}
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (! (obj instanceof Nodes)) return false;
Nodes b = (Nodes) obj;
if (logicalNodeCount != b.logicalNodeCount) return false;
if (type != b.type) return false;
if (!upNodes.equals(b.upNodes)) return false;
if (!nodeStates.equals(b.nodeStates)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(logicalNodeCount, type, nodeStates, upNodes);
}
private NodeState defaultDown() {
return type == NodeType.STORAGE
? DEFAULT_STORAGE_DOWN_NODE_STATE
: DEFAULT_DISTRIBUTOR_DOWN_NODE_STATE;
}
private NodeState defaultUp() {
return defaultUpNodeState(type);
}
} |
I think this is a bit of a legacy issue where instance `equals()` checking has been done in contexts where what you really want is a semantical equality check instead of a value equality check (and descriptions do not matter for state semantics). So most likely intentional but not optimal...! I personally feel descriptions don't really belong _within_ `ClusterState` in the first place, since they're really just metadata related to `set-node-state` and friends that is of zero importance to the content nodes and/or ideal state algorithm. But changing this will likely require re-wiring quite a bit of the cluster controller code that uses `NodeState` et al internally. | public boolean equals(Object o) {
if (!(o instanceof NodeState)) { return false; }
NodeState ns = (NodeState) o;
if (state != ns.state
|| Math.abs(capacity - ns.capacity) > 0.0000000001
|| Math.abs(initProgress - ns.initProgress) > 0.0000000001
|| startTimestamp != ns.startTimestamp
|| minUsedBits != ns.minUsedBits)
{
return false;
}
return true;
} | public boolean equals(Object o) {
if (!(o instanceof NodeState)) { return false; }
NodeState ns = (NodeState) o;
if (state != ns.state
|| Math.abs(capacity - ns.capacity) > 0.0000000001
|| Math.abs(initProgress - ns.initProgress) > 0.0000000001
|| startTimestamp != ns.startTimestamp
|| minUsedBits != ns.minUsedBits)
{
return false;
}
return true;
} | class NodeState implements Cloneable {
public static final String ORCHESTRATOR_RESERVED_DESCRIPTION = "Orchestrator";
private final NodeType type;
private State state;
private String description = "";
private float capacity = 1.0f;
private float initProgress = 1.0f;
private int minUsedBits = 16;
private long startTimestamp = 0;
public static float getListingBucketsInitProgressLimit() { return 0.01f; }
public NodeState(NodeType type, State state) {
this.type = type;
this.state = state;
}
public NodeState clone() {
try{
return (NodeState) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException("Does not happen");
}
}
/**
* A state can not be forced to be in a state above it's reported state.
* For instance, a down being down, cannot be forced up, but a node being down can be forced in maintenance.
*/
public boolean above(NodeState other) {
return (state.ordinal() > other.state.ordinal());
}
public int hashCode() {
return state.hashCode() ^ Double.valueOf(capacity).hashCode();
}
/**
* States are similar if the cluster state doesn't need to be updated due to a change.
* Note that min dist bits may need to alter cluster state, but as we don't know at this point, we ignore it.
* Cluster state will check for that.
*/
public boolean similarTo(Object o) {
if (!(o instanceof NodeState)) {
return false;
}
return similarToImpl((NodeState)o, true);
}
public boolean similarToIgnoringInitProgress(final NodeState other) {
return similarToImpl(other, false);
}
private boolean similarToImpl(final NodeState other, boolean considerInitProgress) {
if (state != other.state) return false;
if (Math.abs(capacity - other.capacity) > 0.0000000001) return false;
if (startTimestamp != other.startTimestamp) return false;
if (considerInitProgress
&& type.equals(NodeType.STORAGE)
&& (initProgress < getListingBucketsInitProgressLimit()
^ other.initProgress < getListingBucketsInitProgressLimit()))
{
return false;
}
return true;
}
public Diff getDiff(NodeState other) {
Diff diff = new Diff();
if (!state.equals(other.state)) {
diff.add(new Diff.Entry("", state, other.state).bold());
}
if (Math.abs(capacity - other.capacity) > 0.000000001) {
diff.add(new Diff.Entry("capacity", capacity, other.capacity));
}
if (minUsedBits != other.minUsedBits) {
diff.add(new Diff.Entry("minUsedBits", minUsedBits, other.minUsedBits));
}
if (Math.abs(initProgress - other.initProgress) > 0.000000001 && state.equals(State.INITIALIZING) && other.state.equals(State.INITIALIZING)) {
diff.add(new Diff.Entry("initProgress", initProgress, other.initProgress));
}
if (startTimestamp != other.startTimestamp) {
diff.add(new Diff.Entry("startTimestamp", startTimestamp, other.startTimestamp));
}
if (!description.equals(other.description)) {
diff.add(new Diff.Entry("description", description, other.description));
}
return diff;
}
public String getTextualDifference(NodeState other) {
return getDiff(other).toString();
}
/** Capacity is set by deserializing a node state. This seems odd, as it is config */
public NodeState setCapacity(float c) { this.capacity = c; return this; }
public NodeState setInitProgress(float p) { this.initProgress = p; return this; }
public NodeState setDescription(String desc) { this.description = desc; return this; }
public NodeState setMinUsedBits(int u) { this.minUsedBits = u; return this; }
public NodeState setState(State state) { this.state = state; return this; }
public NodeState setStartTimestamp(long ts) { this.startTimestamp = ts; return this; }
public double getCapacity() { return this.capacity; }
public double getInitProgress() { return this.initProgress; }
public boolean hasDescription() { return (description.length() > 0); }
public String getDescription() { return description; }
public State getState() { return this.state; }
public int getMinUsedBits() { return minUsedBits; }
public long getStartTimestamp() { return startTimestamp; }
public String toString() { return toString(false); }
public String toString(boolean compact) {
StringBuilder sb = new StringBuilder();
if (compact) {
sb.append(state.serialize().toUpperCase());
} else {
sb.append(state);
}
if (Math.abs(capacity - 1.0) > 0.000000001) {
sb.append(compact ? ", c " : ", capacity ").append(compact ? String.format(Locale.ENGLISH, "%.3g", capacity) : capacity);
}
if (state.equals(State.INITIALIZING)) {
sb.append(compact ? ", i " : ", init progress ").append(compact ? String.format(Locale.ENGLISH, "%.3g", initProgress) : initProgress);
if (type.equals(NodeType.STORAGE)) {
if (initProgress < getListingBucketsInitProgressLimit()) {
sb.append(compact ? " (ls)" : " (listing files)");
} else {
sb.append(compact ? " (read)" : " (reading file headers)");
}
}
}
if (startTimestamp > 0) {
sb.append(compact ? ", t " : ", start timestamp ").append(startTimestamp);
}
if (minUsedBits != 16) {
sb.append(compact ? ", b " : ", minimum used bits ").append(minUsedBits);
}
if (description.length() > 0) {
sb.append(": ").append(description);
}
return sb.toString();
}
public String serialize() { return serialize(-1, false); }
public String serialize(boolean verbose) { return serialize(-1, verbose); }
public String serialize(int nodeIdx, boolean verbose) {
boolean empty = true;
StringBuilder sb = new StringBuilder();
String prefix = (nodeIdx == -1 ? "" : "." + nodeIdx + ".");
if (state != State.UP){
empty = false;
sb.append(prefix).append("s:").append(state.serialize());
}
if (Math.abs(capacity - 1.0) > 0.000000001) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("c:").append(capacity);
}
if (state == State.INITIALIZING) {
sb.append(' ');
sb.append(prefix).append("i:").append(initProgress);
}
if (startTimestamp != 0) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("t:").append(startTimestamp);
}
if (nodeIdx == -1 && minUsedBits != 16) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("b:").append(minUsedBits);
}
if ((verbose || nodeIdx == -1) && description.length() > 0) {
if (!empty) { sb.append(' '); }
sb.append(prefix).append("m:").append(StringUtilities.escape(description, ' '));
}
return sb.toString();
}
/** Creates an instance from the serialized form produced by serialize */
public static NodeState deserialize(NodeType type, String serialized) throws ParseException {
NodeState newState = new NodeState(type, State.UP);
StringTokenizer st = new StringTokenizer(serialized, " \t\r\f\n", false);
while (st.hasMoreTokens()) {
String token = st.nextToken();
int index = token.indexOf(':');
if (index < 0) {
throw new ParseException("Token " + token + " does not contain ':': " + serialized, 0);
}
String key = token.substring(0, index);
String value = token.substring(index + 1);
if (key.length() > 0) switch (key.charAt(0)) {
case 's':
if (key.length() > 1) break;
newState.setState(State.get(value));
continue;
case 'b':
if (key.length() > 1) break;
newState.setMinUsedBits(Integer.parseInt(value));
continue;
case 'c':
if (key.length() > 1) break;
if (type != null && !type.equals(NodeType.STORAGE)) break;
try{
newState.setCapacity(Float.valueOf(value));
} catch (Exception e) {
throw new ParseException("Illegal capacity '" + value + "'. Capacity must be a positive floating point number", 0);
}
continue;
case 'i':
if (key.length() > 1) break;
try{
newState.setInitProgress(Float.valueOf(value));
} catch (Exception e) {
throw new ParseException("Illegal init progress '" + value + "'. Init progress must be a floating point number from 0.0 to 1.0", 0);
}
continue;
case 't':
if (key.length() > 1) break;
try{
newState.setStartTimestamp(Long.valueOf(value));
if (newState.getStartTimestamp() < 0) throw new Exception();
} catch (Exception e) {
throw new ParseException("Illegal start timestamp " + value + ". Start timestamp must be 0 or a positive long.", 0);
}
continue;
case 'm':
if (key.length() > 1) break;
newState.setDescription(StringUtilities.unescape(value));
continue;
case 'd':
if (type != null && !type.equals(NodeType.STORAGE)) break;
int size = 0;
if (key.length() == 1) {
try{
size = Integer.valueOf(value);
} catch (Exception e) {
throw new ParseException("Invalid disk count '" + value + "'. Need a positive integer value", 0);
}
continue;
}
if (key.charAt(1) != '.') break;
int diskIndex;
int endp = key.indexOf('.', 2);
String indexStr = (endp < 0 ? key.substring(2) : key.substring(2, endp));
try{
diskIndex = Integer.valueOf(indexStr);
} catch (Exception e) {
throw new ParseException("Invalid disk index '" + indexStr + "'. need a positive integer value", 0);
}
if (diskIndex >= size) {
throw new ParseException("Cannot index disk " + diskIndex + " of " + size, 0);
}
continue;
default:
break;
}
}
return newState;
}
public void verifyValidInSystemState(NodeType type) {
if (!state.validCurrentNodeState(type)) {
throw new IllegalArgumentException("State " + state + " cannot fit in system state for node of type: " + type);
}
if (type.equals(NodeType.DISTRIBUTOR) && Math.abs(capacity - 1.0) > 0.000000001) {
throw new IllegalArgumentException("Capacity should not be set for a distributor node");
}
}
} | class NodeState implements Cloneable {
public static final String ORCHESTRATOR_RESERVED_DESCRIPTION = "Orchestrator";
private final NodeType type;
private State state;
private String description = "";
private float capacity = 1.0f;
private float initProgress = 1.0f;
private int minUsedBits = 16;
private long startTimestamp = 0;
public static float getListingBucketsInitProgressLimit() { return 0.01f; }
public NodeState(NodeType type, State state) {
this.type = type;
this.state = state;
}
public NodeState clone() {
try{
return (NodeState) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException("Does not happen");
}
}
/**
* A state can not be forced to be in a state above it's reported state.
* For instance, a down being down, cannot be forced up, but a node being down can be forced in maintenance.
*/
public boolean above(NodeState other) {
return (state.ordinal() > other.state.ordinal());
}
public int hashCode() {
return state.hashCode() ^ Double.valueOf(capacity).hashCode();
}
/**
* States are similar if the cluster state doesn't need to be updated due to a change.
* Note that min dist bits may need to alter cluster state, but as we don't know at this point, we ignore it.
* Cluster state will check for that.
*/
public boolean similarTo(Object o) {
if (!(o instanceof NodeState)) {
return false;
}
return similarToImpl((NodeState)o, true);
}
public boolean similarToIgnoringInitProgress(final NodeState other) {
return similarToImpl(other, false);
}
private boolean similarToImpl(final NodeState other, boolean considerInitProgress) {
if (state != other.state) return false;
if (Math.abs(capacity - other.capacity) > 0.0000000001) return false;
if (startTimestamp != other.startTimestamp) return false;
if (considerInitProgress
&& type.equals(NodeType.STORAGE)
&& (initProgress < getListingBucketsInitProgressLimit()
^ other.initProgress < getListingBucketsInitProgressLimit()))
{
return false;
}
return true;
}
public Diff getDiff(NodeState other) {
Diff diff = new Diff();
if (!state.equals(other.state)) {
diff.add(new Diff.Entry("", state, other.state).bold());
}
if (Math.abs(capacity - other.capacity) > 0.000000001) {
diff.add(new Diff.Entry("capacity", capacity, other.capacity));
}
if (minUsedBits != other.minUsedBits) {
diff.add(new Diff.Entry("minUsedBits", minUsedBits, other.minUsedBits));
}
if (Math.abs(initProgress - other.initProgress) > 0.000000001 && state.equals(State.INITIALIZING) && other.state.equals(State.INITIALIZING)) {
diff.add(new Diff.Entry("initProgress", initProgress, other.initProgress));
}
if (startTimestamp != other.startTimestamp) {
diff.add(new Diff.Entry("startTimestamp", startTimestamp, other.startTimestamp));
}
if (!description.equals(other.description)) {
diff.add(new Diff.Entry("description", description, other.description));
}
return diff;
}
public String getTextualDifference(NodeState other) {
return getDiff(other).toString();
}
/** Capacity is set by deserializing a node state. This seems odd, as it is config */
public NodeState setCapacity(float c) { this.capacity = c; return this; }
public NodeState setInitProgress(float p) { this.initProgress = p; return this; }
public NodeState setDescription(String desc) { this.description = desc; return this; }
public NodeState setMinUsedBits(int u) { this.minUsedBits = u; return this; }
public NodeState setState(State state) { this.state = state; return this; }
public NodeState setStartTimestamp(long ts) { this.startTimestamp = ts; return this; }
public double getCapacity() { return this.capacity; }
public double getInitProgress() { return this.initProgress; }
public boolean hasDescription() { return (description.length() > 0); }
public String getDescription() { return description; }
public State getState() { return this.state; }
public int getMinUsedBits() { return minUsedBits; }
public long getStartTimestamp() { return startTimestamp; }
public String toString() { return toString(false); }
public String toString(boolean compact) {
StringBuilder sb = new StringBuilder();
if (compact) {
sb.append(state.serialize().toUpperCase());
} else {
sb.append(state);
}
if (Math.abs(capacity - 1.0) > 0.000000001) {
sb.append(compact ? ", c " : ", capacity ").append(compact ? String.format(Locale.ENGLISH, "%.3g", capacity) : capacity);
}
if (state.equals(State.INITIALIZING)) {
sb.append(compact ? ", i " : ", init progress ").append(compact ? String.format(Locale.ENGLISH, "%.3g", initProgress) : initProgress);
if (type.equals(NodeType.STORAGE)) {
if (initProgress < getListingBucketsInitProgressLimit()) {
sb.append(compact ? " (ls)" : " (listing files)");
} else {
sb.append(compact ? " (read)" : " (reading file headers)");
}
}
}
if (startTimestamp > 0) {
sb.append(compact ? ", t " : ", start timestamp ").append(startTimestamp);
}
if (minUsedBits != 16) {
sb.append(compact ? ", b " : ", minimum used bits ").append(minUsedBits);
}
if (description.length() > 0) {
sb.append(": ").append(description);
}
return sb.toString();
}
public String serialize() { return serialize(-1, false); }
public String serialize(boolean verbose) { return serialize(-1, verbose); }
public String serialize(int nodeIdx, boolean verbose) {
boolean empty = true;
StringBuilder sb = new StringBuilder();
String prefix = (nodeIdx == -1 ? "" : "." + nodeIdx + ".");
if (state != State.UP){
empty = false;
sb.append(prefix).append("s:").append(state.serialize());
}
if (Math.abs(capacity - 1.0) > 0.000000001) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("c:").append(capacity);
}
if (state == State.INITIALIZING) {
sb.append(' ');
sb.append(prefix).append("i:").append(initProgress);
}
if (startTimestamp != 0) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("t:").append(startTimestamp);
}
if (nodeIdx == -1 && minUsedBits != 16) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("b:").append(minUsedBits);
}
if ((verbose || nodeIdx == -1) && description.length() > 0) {
if (!empty) { sb.append(' '); }
sb.append(prefix).append("m:").append(StringUtilities.escape(description, ' '));
}
return sb.toString();
}
/** Creates an instance from the serialized form produced by serialize */
public static NodeState deserialize(NodeType type, String serialized) throws ParseException {
NodeState newState = new NodeState(type, State.UP);
StringTokenizer st = new StringTokenizer(serialized, " \t\r\f\n", false);
while (st.hasMoreTokens()) {
String token = st.nextToken();
int index = token.indexOf(':');
if (index < 0) {
throw new ParseException("Token " + token + " does not contain ':': " + serialized, 0);
}
String key = token.substring(0, index);
String value = token.substring(index + 1);
if (key.length() > 0) switch (key.charAt(0)) {
case 's':
if (key.length() > 1) break;
newState.setState(State.get(value));
continue;
case 'b':
if (key.length() > 1) break;
newState.setMinUsedBits(Integer.parseInt(value));
continue;
case 'c':
if (key.length() > 1) break;
if (type != null && !type.equals(NodeType.STORAGE)) break;
try{
newState.setCapacity(Float.valueOf(value));
} catch (Exception e) {
throw new ParseException("Illegal capacity '" + value + "'. Capacity must be a positive floating point number", 0);
}
continue;
case 'i':
if (key.length() > 1) break;
try{
newState.setInitProgress(Float.valueOf(value));
} catch (Exception e) {
throw new ParseException("Illegal init progress '" + value + "'. Init progress must be a floating point number from 0.0 to 1.0", 0);
}
continue;
case 't':
if (key.length() > 1) break;
try{
newState.setStartTimestamp(Long.valueOf(value));
if (newState.getStartTimestamp() < 0) throw new Exception();
} catch (Exception e) {
throw new ParseException("Illegal start timestamp " + value + ". Start timestamp must be 0 or a positive long.", 0);
}
continue;
case 'm':
if (key.length() > 1) break;
newState.setDescription(StringUtilities.unescape(value));
continue;
case 'd':
if (type != null && !type.equals(NodeType.STORAGE)) break;
int size = 0;
if (key.length() == 1) {
try{
size = Integer.valueOf(value);
} catch (Exception e) {
throw new ParseException("Invalid disk count '" + value + "'. Need a positive integer value", 0);
}
continue;
}
if (key.charAt(1) != '.') break;
int diskIndex;
int endp = key.indexOf('.', 2);
String indexStr = (endp < 0 ? key.substring(2) : key.substring(2, endp));
try{
diskIndex = Integer.valueOf(indexStr);
} catch (Exception e) {
throw new ParseException("Invalid disk index '" + indexStr + "'. need a positive integer value", 0);
}
if (diskIndex >= size) {
throw new ParseException("Cannot index disk " + diskIndex + " of " + size, 0);
}
continue;
default:
break;
}
}
return newState;
}
public void verifyValidInSystemState(NodeType type) {
if (!state.validCurrentNodeState(type)) {
throw new IllegalArgumentException("State " + state + " cannot fit in system state for node of type: " + type);
}
if (type.equals(NodeType.DISTRIBUTOR) && Math.abs(capacity - 1.0) > 0.000000001) {
throw new IllegalArgumentException("Capacity should not be set for a distributor node");
}
}
} | |
More fuel for the "node states should be immutable" fire... 🔥 | NodeState getNodeState(int index) {
NodeState ns = nodeStates.get(index);
if (ns != null) return ns;
return (index >= getMaxIndex() || ! upNodes.get(index))
? new NodeState(type, State.DOWN)
: new NodeState(type, State.UP);
} | : new NodeState(type, State.UP); | NodeState getNodeState(int index) {
NodeState ns = nodeStates.get(index);
if (ns != null) return ns;
return (index >= getLogicalNodeCount() || ! upNodes.get(index))
? new NodeState(type, State.DOWN)
: new NodeState(type, State.UP);
} | class Nodes {
private int maxIndex;
private final NodeType type;
private final BitSet upNodes;
private final Map<Integer, NodeState> nodeStates = new HashMap<>();
Nodes(NodeType type) {
this.type = type;
upNodes = new BitSet();
}
Nodes(Nodes b) {
maxIndex = b.maxIndex;
type = b.type;
upNodes = (BitSet) b.upNodes.clone();
b.nodeStates.forEach((key, value) -> nodeStates.put(key, value.clone()));
}
void updateMaxIndex(int index) {
if (index > maxIndex) {
upNodes.set(maxIndex, index);
maxIndex = index;
}
}
int getMaxIndex() { return maxIndex; }
private void validateInput(Node node, NodeState ns) {
ns.verifyValidInSystemState(node.getType());
if (node.getType() != type) {
throw new IllegalArgumentException("NodeType '" + node.getType() + "' differs from '" + type + "'");
}
}
void setNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
if (index >= maxIndex) {
maxIndex = index + 1;
}
setNodeStateInternal(index, ns);
}
void addNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
updateMaxIndex(index + 1);
setNodeStateInternal(index, ns);
}
private static boolean equalsWithDescription(NodeState a, NodeState b) {
return a.equals(b) && ((a.getState() != State.DOWN) || a.getDescription().equals(b.getDescription()));
}
private void setNodeStateInternal(int index, NodeState ns) {
nodeStates.remove(index);
if (ns.getState() == State.DOWN) {
upNodes.clear(index);
if ( ! equalsWithDescription(defaultDown(), ns)) {
nodeStates.put(index, ns);
}
} else {
upNodes.set(index);
if ( ! equalsWithDescription(defaultUp(), ns)) {
nodeStates.put(index, ns);
}
}
}
boolean similarToImpl(Nodes other, final NodeStateCmp nodeStateCmp) {
if (maxIndex != other.maxIndex) return false;
if (type != other.type) return false;
if ( ! upNodes.equals(other.upNodes)) return false;
for (Integer node : unionNodeSetWith(other.nodeStates.keySet())) {
final NodeState lhs = nodeStates.get(node);
final NodeState rhs = other.nodeStates.get(node);
if (!nodeStateCmp.similar(type, lhs, rhs)) {
return false;
}
}
return true;
}
private Set<Integer> unionNodeSetWith(final Set<Integer> otherNodes) {
final Set<Integer> unionNodeSet = new HashSet<>(nodeStates.keySet());
unionNodeSet.addAll(otherNodes);
return unionNodeSet;
}
@Override
public String toString() { return toString(false); }
String toString(boolean verbose) {
StringBuilder sb = new StringBuilder();
int nodeCount = verbose ? getMaxIndex() : upNodes.length();
if ( nodeCount > 0 ) {
sb.append(type == NodeType.DISTRIBUTOR ? " distributor:" : " storage:").append(nodeCount);
for (int i = 0; i < nodeCount; i++) {
String nodeState = getNodeState(i).serialize(i, verbose);
if (!nodeState.isEmpty()) {
sb.append(' ').append(nodeState);
}
}
}
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (! (obj instanceof Nodes)) return false;
Nodes b = (Nodes) obj;
if (maxIndex != b.maxIndex) return false;
if (type != b.type) return false;
if (!upNodes.equals(b.upNodes)) return false;
if (!nodeStates.equals(b.nodeStates)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(maxIndex, type, nodeStates, upNodes);
}
private NodeState defaultDown() {
return type == NodeType.STORAGE
? DEFAULT_STORAGE_DOWN_NODE_STATE
: DEFAULT_DISTRIBUTOR_DOWN_NODE_STATE;
}
private NodeState defaultUp() {
return defaultUpNodeState(type);
}
} | class Nodes {
private int logicalNodeCount;
private final NodeType type;
private final BitSet upNodes;
private final Map<Integer, NodeState> nodeStates = new HashMap<>();
Nodes(NodeType type) {
this.type = type;
upNodes = new BitSet();
}
Nodes(Nodes b) {
logicalNodeCount = b.logicalNodeCount;
type = b.type;
upNodes = (BitSet) b.upNodes.clone();
b.nodeStates.forEach((key, value) -> nodeStates.put(key, value.clone()));
}
void updateMaxIndex(int index) {
if (index > logicalNodeCount) {
upNodes.set(logicalNodeCount, index);
logicalNodeCount = index;
}
}
int getLogicalNodeCount() { return logicalNodeCount; }
private void validateInput(Node node, NodeState ns) {
ns.verifyValidInSystemState(node.getType());
if (node.getType() != type) {
throw new IllegalArgumentException("NodeType '" + node.getType() + "' differs from '" + type + "'");
}
}
void setNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
if (index >= logicalNodeCount) {
logicalNodeCount = index + 1;
}
setNodeStateInternal(index, ns);
}
void addNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
updateMaxIndex(index + 1);
setNodeStateInternal(index, ns);
}
private static boolean equalsWithDescription(NodeState a, NodeState b) {
return a.equals(b) && ((a.getState() != State.DOWN) || a.getDescription().equals(b.getDescription()));
}
private void setNodeStateInternal(int index, NodeState ns) {
nodeStates.remove(index);
if (ns.getState() == State.DOWN) {
upNodes.clear(index);
if ( ! equalsWithDescription(defaultDown(), ns)) {
nodeStates.put(index, ns);
}
} else {
upNodes.set(index);
if ( ! equalsWithDescription(defaultUp(), ns)) {
nodeStates.put(index, ns);
}
}
}
boolean similarToImpl(Nodes other, final NodeStateCmp nodeStateCmp) {
if (logicalNodeCount != other.logicalNodeCount) return false;
if (type != other.type) return false;
if ( ! upNodes.equals(other.upNodes)) return false;
for (Integer node : unionNodeSetWith(other.nodeStates.keySet())) {
final NodeState lhs = nodeStates.get(node);
final NodeState rhs = other.nodeStates.get(node);
if (!nodeStateCmp.similar(type, lhs, rhs)) {
return false;
}
}
return true;
}
private Set<Integer> unionNodeSetWith(final Set<Integer> otherNodes) {
final Set<Integer> unionNodeSet = new HashSet<>(nodeStates.keySet());
unionNodeSet.addAll(otherNodes);
return unionNodeSet;
}
@Override
public String toString() { return toString(false); }
String toString(boolean verbose) {
StringBuilder sb = new StringBuilder();
int nodeCount = verbose ? getLogicalNodeCount() : upNodes.length();
if ( nodeCount > 0 ) {
sb.append(type == NodeType.DISTRIBUTOR ? " distributor:" : " storage:").append(nodeCount);
for (int i = 0; i < nodeCount; i++) {
String nodeState = getNodeState(i).serialize(i, verbose);
if (!nodeState.isEmpty()) {
sb.append(' ').append(nodeState);
}
}
}
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (! (obj instanceof Nodes)) return false;
Nodes b = (Nodes) obj;
if (logicalNodeCount != b.logicalNodeCount) return false;
if (type != b.type) return false;
if (!upNodes.equals(b.upNodes)) return false;
if (!nodeStates.equals(b.nodeStates)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(logicalNodeCount, type, nodeStates, upNodes);
}
private NodeState defaultDown() {
return type == NodeType.STORAGE
? DEFAULT_STORAGE_DOWN_NODE_STATE
: DEFAULT_DISTRIBUTOR_DOWN_NODE_STATE;
}
private NodeState defaultUp() {
return defaultUpNodeState(type);
}
} |
Updated comments. | public boolean equals(Object o) {
if (!(o instanceof NodeState)) { return false; }
NodeState ns = (NodeState) o;
if (state != ns.state
|| Math.abs(capacity - ns.capacity) > 0.0000000001
|| Math.abs(initProgress - ns.initProgress) > 0.0000000001
|| startTimestamp != ns.startTimestamp
|| minUsedBits != ns.minUsedBits)
{
return false;
}
return true;
} | public boolean equals(Object o) {
if (!(o instanceof NodeState)) { return false; }
NodeState ns = (NodeState) o;
if (state != ns.state
|| Math.abs(capacity - ns.capacity) > 0.0000000001
|| Math.abs(initProgress - ns.initProgress) > 0.0000000001
|| startTimestamp != ns.startTimestamp
|| minUsedBits != ns.minUsedBits)
{
return false;
}
return true;
} | class NodeState implements Cloneable {
public static final String ORCHESTRATOR_RESERVED_DESCRIPTION = "Orchestrator";
private final NodeType type;
private State state;
private String description = "";
private float capacity = 1.0f;
private float initProgress = 1.0f;
private int minUsedBits = 16;
private long startTimestamp = 0;
public static float getListingBucketsInitProgressLimit() { return 0.01f; }
public NodeState(NodeType type, State state) {
this.type = type;
this.state = state;
}
public NodeState clone() {
try{
return (NodeState) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException("Does not happen");
}
}
/**
* A state can not be forced to be in a state above it's reported state.
* For instance, a down being down, cannot be forced up, but a node being down can be forced in maintenance.
*/
public boolean above(NodeState other) {
return (state.ordinal() > other.state.ordinal());
}
public int hashCode() {
return state.hashCode() ^ Double.valueOf(capacity).hashCode();
}
/**
* States are similar if the cluster state doesn't need to be updated due to a change.
* Note that min dist bits may need to alter cluster state, but as we don't know at this point, we ignore it.
* Cluster state will check for that.
*/
public boolean similarTo(Object o) {
if (!(o instanceof NodeState)) {
return false;
}
return similarToImpl((NodeState)o, true);
}
public boolean similarToIgnoringInitProgress(final NodeState other) {
return similarToImpl(other, false);
}
private boolean similarToImpl(final NodeState other, boolean considerInitProgress) {
if (state != other.state) return false;
if (Math.abs(capacity - other.capacity) > 0.0000000001) return false;
if (startTimestamp != other.startTimestamp) return false;
if (considerInitProgress
&& type.equals(NodeType.STORAGE)
&& (initProgress < getListingBucketsInitProgressLimit()
^ other.initProgress < getListingBucketsInitProgressLimit()))
{
return false;
}
return true;
}
public Diff getDiff(NodeState other) {
Diff diff = new Diff();
if (!state.equals(other.state)) {
diff.add(new Diff.Entry("", state, other.state).bold());
}
if (Math.abs(capacity - other.capacity) > 0.000000001) {
diff.add(new Diff.Entry("capacity", capacity, other.capacity));
}
if (minUsedBits != other.minUsedBits) {
diff.add(new Diff.Entry("minUsedBits", minUsedBits, other.minUsedBits));
}
if (Math.abs(initProgress - other.initProgress) > 0.000000001 && state.equals(State.INITIALIZING) && other.state.equals(State.INITIALIZING)) {
diff.add(new Diff.Entry("initProgress", initProgress, other.initProgress));
}
if (startTimestamp != other.startTimestamp) {
diff.add(new Diff.Entry("startTimestamp", startTimestamp, other.startTimestamp));
}
if (!description.equals(other.description)) {
diff.add(new Diff.Entry("description", description, other.description));
}
return diff;
}
public String getTextualDifference(NodeState other) {
return getDiff(other).toString();
}
/** Capacity is set by deserializing a node state. This seems odd, as it is config */
public NodeState setCapacity(float c) { this.capacity = c; return this; }
public NodeState setInitProgress(float p) { this.initProgress = p; return this; }
public NodeState setDescription(String desc) { this.description = desc; return this; }
public NodeState setMinUsedBits(int u) { this.minUsedBits = u; return this; }
public NodeState setState(State state) { this.state = state; return this; }
public NodeState setStartTimestamp(long ts) { this.startTimestamp = ts; return this; }
public double getCapacity() { return this.capacity; }
public double getInitProgress() { return this.initProgress; }
public boolean hasDescription() { return (description.length() > 0); }
public String getDescription() { return description; }
public State getState() { return this.state; }
public int getMinUsedBits() { return minUsedBits; }
public long getStartTimestamp() { return startTimestamp; }
public String toString() { return toString(false); }
public String toString(boolean compact) {
StringBuilder sb = new StringBuilder();
if (compact) {
sb.append(state.serialize().toUpperCase());
} else {
sb.append(state);
}
if (Math.abs(capacity - 1.0) > 0.000000001) {
sb.append(compact ? ", c " : ", capacity ").append(compact ? String.format(Locale.ENGLISH, "%.3g", capacity) : capacity);
}
if (state.equals(State.INITIALIZING)) {
sb.append(compact ? ", i " : ", init progress ").append(compact ? String.format(Locale.ENGLISH, "%.3g", initProgress) : initProgress);
if (type.equals(NodeType.STORAGE)) {
if (initProgress < getListingBucketsInitProgressLimit()) {
sb.append(compact ? " (ls)" : " (listing files)");
} else {
sb.append(compact ? " (read)" : " (reading file headers)");
}
}
}
if (startTimestamp > 0) {
sb.append(compact ? ", t " : ", start timestamp ").append(startTimestamp);
}
if (minUsedBits != 16) {
sb.append(compact ? ", b " : ", minimum used bits ").append(minUsedBits);
}
if (description.length() > 0) {
sb.append(": ").append(description);
}
return sb.toString();
}
public String serialize() { return serialize(-1, false); }
public String serialize(boolean verbose) { return serialize(-1, verbose); }
public String serialize(int nodeIdx, boolean verbose) {
boolean empty = true;
StringBuilder sb = new StringBuilder();
String prefix = (nodeIdx == -1 ? "" : "." + nodeIdx + ".");
if (state != State.UP){
empty = false;
sb.append(prefix).append("s:").append(state.serialize());
}
if (Math.abs(capacity - 1.0) > 0.000000001) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("c:").append(capacity);
}
if (state == State.INITIALIZING) {
sb.append(' ');
sb.append(prefix).append("i:").append(initProgress);
}
if (startTimestamp != 0) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("t:").append(startTimestamp);
}
if (nodeIdx == -1 && minUsedBits != 16) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("b:").append(minUsedBits);
}
if ((verbose || nodeIdx == -1) && description.length() > 0) {
if (!empty) { sb.append(' '); }
sb.append(prefix).append("m:").append(StringUtilities.escape(description, ' '));
}
return sb.toString();
}
/** Creates an instance from the serialized form produced by serialize */
public static NodeState deserialize(NodeType type, String serialized) throws ParseException {
NodeState newState = new NodeState(type, State.UP);
StringTokenizer st = new StringTokenizer(serialized, " \t\r\f\n", false);
while (st.hasMoreTokens()) {
String token = st.nextToken();
int index = token.indexOf(':');
if (index < 0) {
throw new ParseException("Token " + token + " does not contain ':': " + serialized, 0);
}
String key = token.substring(0, index);
String value = token.substring(index + 1);
if (key.length() > 0) switch (key.charAt(0)) {
case 's':
if (key.length() > 1) break;
newState.setState(State.get(value));
continue;
case 'b':
if (key.length() > 1) break;
newState.setMinUsedBits(Integer.parseInt(value));
continue;
case 'c':
if (key.length() > 1) break;
if (type != null && !type.equals(NodeType.STORAGE)) break;
try{
newState.setCapacity(Float.valueOf(value));
} catch (Exception e) {
throw new ParseException("Illegal capacity '" + value + "'. Capacity must be a positive floating point number", 0);
}
continue;
case 'i':
if (key.length() > 1) break;
try{
newState.setInitProgress(Float.valueOf(value));
} catch (Exception e) {
throw new ParseException("Illegal init progress '" + value + "'. Init progress must be a floating point number from 0.0 to 1.0", 0);
}
continue;
case 't':
if (key.length() > 1) break;
try{
newState.setStartTimestamp(Long.valueOf(value));
if (newState.getStartTimestamp() < 0) throw new Exception();
} catch (Exception e) {
throw new ParseException("Illegal start timestamp " + value + ". Start timestamp must be 0 or a positive long.", 0);
}
continue;
case 'm':
if (key.length() > 1) break;
newState.setDescription(StringUtilities.unescape(value));
continue;
case 'd':
if (type != null && !type.equals(NodeType.STORAGE)) break;
int size = 0;
if (key.length() == 1) {
try{
size = Integer.valueOf(value);
} catch (Exception e) {
throw new ParseException("Invalid disk count '" + value + "'. Need a positive integer value", 0);
}
continue;
}
if (key.charAt(1) != '.') break;
int diskIndex;
int endp = key.indexOf('.', 2);
String indexStr = (endp < 0 ? key.substring(2) : key.substring(2, endp));
try{
diskIndex = Integer.valueOf(indexStr);
} catch (Exception e) {
throw new ParseException("Invalid disk index '" + indexStr + "'. need a positive integer value", 0);
}
if (diskIndex >= size) {
throw new ParseException("Cannot index disk " + diskIndex + " of " + size, 0);
}
continue;
default:
break;
}
}
return newState;
}
public void verifyValidInSystemState(NodeType type) {
if (!state.validCurrentNodeState(type)) {
throw new IllegalArgumentException("State " + state + " cannot fit in system state for node of type: " + type);
}
if (type.equals(NodeType.DISTRIBUTOR) && Math.abs(capacity - 1.0) > 0.000000001) {
throw new IllegalArgumentException("Capacity should not be set for a distributor node");
}
}
} | class NodeState implements Cloneable {
public static final String ORCHESTRATOR_RESERVED_DESCRIPTION = "Orchestrator";
private final NodeType type;
private State state;
private String description = "";
private float capacity = 1.0f;
private float initProgress = 1.0f;
private int minUsedBits = 16;
private long startTimestamp = 0;
public static float getListingBucketsInitProgressLimit() { return 0.01f; }
public NodeState(NodeType type, State state) {
this.type = type;
this.state = state;
}
public NodeState clone() {
try{
return (NodeState) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException("Does not happen");
}
}
/**
* A state can not be forced to be in a state above it's reported state.
* For instance, a down being down, cannot be forced up, but a node being down can be forced in maintenance.
*/
public boolean above(NodeState other) {
return (state.ordinal() > other.state.ordinal());
}
public int hashCode() {
return state.hashCode() ^ Double.valueOf(capacity).hashCode();
}
/**
* States are similar if the cluster state doesn't need to be updated due to a change.
* Note that min dist bits may need to alter cluster state, but as we don't know at this point, we ignore it.
* Cluster state will check for that.
*/
public boolean similarTo(Object o) {
if (!(o instanceof NodeState)) {
return false;
}
return similarToImpl((NodeState)o, true);
}
public boolean similarToIgnoringInitProgress(final NodeState other) {
return similarToImpl(other, false);
}
private boolean similarToImpl(final NodeState other, boolean considerInitProgress) {
if (state != other.state) return false;
if (Math.abs(capacity - other.capacity) > 0.0000000001) return false;
if (startTimestamp != other.startTimestamp) return false;
if (considerInitProgress
&& type.equals(NodeType.STORAGE)
&& (initProgress < getListingBucketsInitProgressLimit()
^ other.initProgress < getListingBucketsInitProgressLimit()))
{
return false;
}
return true;
}
public Diff getDiff(NodeState other) {
Diff diff = new Diff();
if (!state.equals(other.state)) {
diff.add(new Diff.Entry("", state, other.state).bold());
}
if (Math.abs(capacity - other.capacity) > 0.000000001) {
diff.add(new Diff.Entry("capacity", capacity, other.capacity));
}
if (minUsedBits != other.minUsedBits) {
diff.add(new Diff.Entry("minUsedBits", minUsedBits, other.minUsedBits));
}
if (Math.abs(initProgress - other.initProgress) > 0.000000001 && state.equals(State.INITIALIZING) && other.state.equals(State.INITIALIZING)) {
diff.add(new Diff.Entry("initProgress", initProgress, other.initProgress));
}
if (startTimestamp != other.startTimestamp) {
diff.add(new Diff.Entry("startTimestamp", startTimestamp, other.startTimestamp));
}
if (!description.equals(other.description)) {
diff.add(new Diff.Entry("description", description, other.description));
}
return diff;
}
public String getTextualDifference(NodeState other) {
return getDiff(other).toString();
}
/** Capacity is set by deserializing a node state. This seems odd, as it is config */
public NodeState setCapacity(float c) { this.capacity = c; return this; }
public NodeState setInitProgress(float p) { this.initProgress = p; return this; }
public NodeState setDescription(String desc) { this.description = desc; return this; }
public NodeState setMinUsedBits(int u) { this.minUsedBits = u; return this; }
public NodeState setState(State state) { this.state = state; return this; }
public NodeState setStartTimestamp(long ts) { this.startTimestamp = ts; return this; }
public double getCapacity() { return this.capacity; }
public double getInitProgress() { return this.initProgress; }
public boolean hasDescription() { return (description.length() > 0); }
public String getDescription() { return description; }
public State getState() { return this.state; }
public int getMinUsedBits() { return minUsedBits; }
public long getStartTimestamp() { return startTimestamp; }
public String toString() { return toString(false); }
public String toString(boolean compact) {
StringBuilder sb = new StringBuilder();
if (compact) {
sb.append(state.serialize().toUpperCase());
} else {
sb.append(state);
}
if (Math.abs(capacity - 1.0) > 0.000000001) {
sb.append(compact ? ", c " : ", capacity ").append(compact ? String.format(Locale.ENGLISH, "%.3g", capacity) : capacity);
}
if (state.equals(State.INITIALIZING)) {
sb.append(compact ? ", i " : ", init progress ").append(compact ? String.format(Locale.ENGLISH, "%.3g", initProgress) : initProgress);
if (type.equals(NodeType.STORAGE)) {
if (initProgress < getListingBucketsInitProgressLimit()) {
sb.append(compact ? " (ls)" : " (listing files)");
} else {
sb.append(compact ? " (read)" : " (reading file headers)");
}
}
}
if (startTimestamp > 0) {
sb.append(compact ? ", t " : ", start timestamp ").append(startTimestamp);
}
if (minUsedBits != 16) {
sb.append(compact ? ", b " : ", minimum used bits ").append(minUsedBits);
}
if (description.length() > 0) {
sb.append(": ").append(description);
}
return sb.toString();
}
public String serialize() { return serialize(-1, false); }
public String serialize(boolean verbose) { return serialize(-1, verbose); }
public String serialize(int nodeIdx, boolean verbose) {
boolean empty = true;
StringBuilder sb = new StringBuilder();
String prefix = (nodeIdx == -1 ? "" : "." + nodeIdx + ".");
if (state != State.UP){
empty = false;
sb.append(prefix).append("s:").append(state.serialize());
}
if (Math.abs(capacity - 1.0) > 0.000000001) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("c:").append(capacity);
}
if (state == State.INITIALIZING) {
sb.append(' ');
sb.append(prefix).append("i:").append(initProgress);
}
if (startTimestamp != 0) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("t:").append(startTimestamp);
}
if (nodeIdx == -1 && minUsedBits != 16) {
if (empty) { empty = false; } else { sb.append(' '); }
sb.append(prefix).append("b:").append(minUsedBits);
}
if ((verbose || nodeIdx == -1) && description.length() > 0) {
if (!empty) { sb.append(' '); }
sb.append(prefix).append("m:").append(StringUtilities.escape(description, ' '));
}
return sb.toString();
}
/** Creates an instance from the serialized form produced by serialize */
public static NodeState deserialize(NodeType type, String serialized) throws ParseException {
NodeState newState = new NodeState(type, State.UP);
StringTokenizer st = new StringTokenizer(serialized, " \t\r\f\n", false);
while (st.hasMoreTokens()) {
String token = st.nextToken();
int index = token.indexOf(':');
if (index < 0) {
throw new ParseException("Token " + token + " does not contain ':': " + serialized, 0);
}
String key = token.substring(0, index);
String value = token.substring(index + 1);
if (key.length() > 0) switch (key.charAt(0)) {
case 's':
if (key.length() > 1) break;
newState.setState(State.get(value));
continue;
case 'b':
if (key.length() > 1) break;
newState.setMinUsedBits(Integer.parseInt(value));
continue;
case 'c':
if (key.length() > 1) break;
if (type != null && !type.equals(NodeType.STORAGE)) break;
try{
newState.setCapacity(Float.valueOf(value));
} catch (Exception e) {
throw new ParseException("Illegal capacity '" + value + "'. Capacity must be a positive floating point number", 0);
}
continue;
case 'i':
if (key.length() > 1) break;
try{
newState.setInitProgress(Float.valueOf(value));
} catch (Exception e) {
throw new ParseException("Illegal init progress '" + value + "'. Init progress must be a floating point number from 0.0 to 1.0", 0);
}
continue;
case 't':
if (key.length() > 1) break;
try{
newState.setStartTimestamp(Long.valueOf(value));
if (newState.getStartTimestamp() < 0) throw new Exception();
} catch (Exception e) {
throw new ParseException("Illegal start timestamp " + value + ". Start timestamp must be 0 or a positive long.", 0);
}
continue;
case 'm':
if (key.length() > 1) break;
newState.setDescription(StringUtilities.unescape(value));
continue;
case 'd':
if (type != null && !type.equals(NodeType.STORAGE)) break;
int size = 0;
if (key.length() == 1) {
try{
size = Integer.valueOf(value);
} catch (Exception e) {
throw new ParseException("Invalid disk count '" + value + "'. Need a positive integer value", 0);
}
continue;
}
if (key.charAt(1) != '.') break;
int diskIndex;
int endp = key.indexOf('.', 2);
String indexStr = (endp < 0 ? key.substring(2) : key.substring(2, endp));
try{
diskIndex = Integer.valueOf(indexStr);
} catch (Exception e) {
throw new ParseException("Invalid disk index '" + indexStr + "'. need a positive integer value", 0);
}
if (diskIndex >= size) {
throw new ParseException("Cannot index disk " + diskIndex + " of " + size, 0);
}
continue;
default:
break;
}
}
return newState;
}
public void verifyValidInSystemState(NodeType type) {
if (!state.validCurrentNodeState(type)) {
throw new IllegalArgumentException("State " + state + " cannot fit in system state for node of type: " + type);
}
if (type.equals(NodeType.DISTRIBUTOR) && Math.abs(capacity - 1.0) > 0.000000001) {
throw new IllegalArgumentException("Capacity should not be set for a distributor node");
}
}
} | |
This breaks down if someone does getNodeState(node_that_is_up).modify_some_important_field. | NodeState getNodeState(int index) {
NodeState ns = nodeStates.get(index);
if (ns != null) return ns;
return (index >= getMaxIndex() || ! upNodes.get(index))
? new NodeState(type, State.DOWN)
: new NodeState(type, State.UP);
} | : new NodeState(type, State.UP); | NodeState getNodeState(int index) {
NodeState ns = nodeStates.get(index);
if (ns != null) return ns;
return (index >= getLogicalNodeCount() || ! upNodes.get(index))
? new NodeState(type, State.DOWN)
: new NodeState(type, State.UP);
} | class Nodes {
private int maxIndex;
private final NodeType type;
private final BitSet upNodes;
private final Map<Integer, NodeState> nodeStates = new HashMap<>();
Nodes(NodeType type) {
this.type = type;
upNodes = new BitSet();
}
Nodes(Nodes b) {
maxIndex = b.maxIndex;
type = b.type;
upNodes = (BitSet) b.upNodes.clone();
b.nodeStates.forEach((key, value) -> nodeStates.put(key, value.clone()));
}
void updateMaxIndex(int index) {
if (index > maxIndex) {
upNodes.set(maxIndex, index);
maxIndex = index;
}
}
int getMaxIndex() { return maxIndex; }
private void validateInput(Node node, NodeState ns) {
ns.verifyValidInSystemState(node.getType());
if (node.getType() != type) {
throw new IllegalArgumentException("NodeType '" + node.getType() + "' differs from '" + type + "'");
}
}
void setNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
if (index >= maxIndex) {
maxIndex = index + 1;
}
setNodeStateInternal(index, ns);
}
void addNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
updateMaxIndex(index + 1);
setNodeStateInternal(index, ns);
}
private static boolean equalsWithDescription(NodeState a, NodeState b) {
return a.equals(b) && ((a.getState() != State.DOWN) || a.getDescription().equals(b.getDescription()));
}
private void setNodeStateInternal(int index, NodeState ns) {
nodeStates.remove(index);
if (ns.getState() == State.DOWN) {
upNodes.clear(index);
if ( ! equalsWithDescription(defaultDown(), ns)) {
nodeStates.put(index, ns);
}
} else {
upNodes.set(index);
if ( ! equalsWithDescription(defaultUp(), ns)) {
nodeStates.put(index, ns);
}
}
}
boolean similarToImpl(Nodes other, final NodeStateCmp nodeStateCmp) {
if (maxIndex != other.maxIndex) return false;
if (type != other.type) return false;
if ( ! upNodes.equals(other.upNodes)) return false;
for (Integer node : unionNodeSetWith(other.nodeStates.keySet())) {
final NodeState lhs = nodeStates.get(node);
final NodeState rhs = other.nodeStates.get(node);
if (!nodeStateCmp.similar(type, lhs, rhs)) {
return false;
}
}
return true;
}
private Set<Integer> unionNodeSetWith(final Set<Integer> otherNodes) {
final Set<Integer> unionNodeSet = new HashSet<>(nodeStates.keySet());
unionNodeSet.addAll(otherNodes);
return unionNodeSet;
}
@Override
public String toString() { return toString(false); }
String toString(boolean verbose) {
StringBuilder sb = new StringBuilder();
int nodeCount = verbose ? getMaxIndex() : upNodes.length();
if ( nodeCount > 0 ) {
sb.append(type == NodeType.DISTRIBUTOR ? " distributor:" : " storage:").append(nodeCount);
for (int i = 0; i < nodeCount; i++) {
String nodeState = getNodeState(i).serialize(i, verbose);
if (!nodeState.isEmpty()) {
sb.append(' ').append(nodeState);
}
}
}
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (! (obj instanceof Nodes)) return false;
Nodes b = (Nodes) obj;
if (maxIndex != b.maxIndex) return false;
if (type != b.type) return false;
if (!upNodes.equals(b.upNodes)) return false;
if (!nodeStates.equals(b.nodeStates)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(maxIndex, type, nodeStates, upNodes);
}
private NodeState defaultDown() {
return type == NodeType.STORAGE
? DEFAULT_STORAGE_DOWN_NODE_STATE
: DEFAULT_DISTRIBUTOR_DOWN_NODE_STATE;
}
private NodeState defaultUp() {
return defaultUpNodeState(type);
}
} | class Nodes {
private int logicalNodeCount;
private final NodeType type;
private final BitSet upNodes;
private final Map<Integer, NodeState> nodeStates = new HashMap<>();
Nodes(NodeType type) {
this.type = type;
upNodes = new BitSet();
}
Nodes(Nodes b) {
logicalNodeCount = b.logicalNodeCount;
type = b.type;
upNodes = (BitSet) b.upNodes.clone();
b.nodeStates.forEach((key, value) -> nodeStates.put(key, value.clone()));
}
void updateMaxIndex(int index) {
if (index > logicalNodeCount) {
upNodes.set(logicalNodeCount, index);
logicalNodeCount = index;
}
}
int getLogicalNodeCount() { return logicalNodeCount; }
private void validateInput(Node node, NodeState ns) {
ns.verifyValidInSystemState(node.getType());
if (node.getType() != type) {
throw new IllegalArgumentException("NodeType '" + node.getType() + "' differs from '" + type + "'");
}
}
void setNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
if (index >= logicalNodeCount) {
logicalNodeCount = index + 1;
}
setNodeStateInternal(index, ns);
}
void addNodeState(Node node, NodeState ns) {
validateInput(node, ns);
int index = node.getIndex();
updateMaxIndex(index + 1);
setNodeStateInternal(index, ns);
}
private static boolean equalsWithDescription(NodeState a, NodeState b) {
return a.equals(b) && ((a.getState() != State.DOWN) || a.getDescription().equals(b.getDescription()));
}
private void setNodeStateInternal(int index, NodeState ns) {
nodeStates.remove(index);
if (ns.getState() == State.DOWN) {
upNodes.clear(index);
if ( ! equalsWithDescription(defaultDown(), ns)) {
nodeStates.put(index, ns);
}
} else {
upNodes.set(index);
if ( ! equalsWithDescription(defaultUp(), ns)) {
nodeStates.put(index, ns);
}
}
}
boolean similarToImpl(Nodes other, final NodeStateCmp nodeStateCmp) {
if (logicalNodeCount != other.logicalNodeCount) return false;
if (type != other.type) return false;
if ( ! upNodes.equals(other.upNodes)) return false;
for (Integer node : unionNodeSetWith(other.nodeStates.keySet())) {
final NodeState lhs = nodeStates.get(node);
final NodeState rhs = other.nodeStates.get(node);
if (!nodeStateCmp.similar(type, lhs, rhs)) {
return false;
}
}
return true;
}
private Set<Integer> unionNodeSetWith(final Set<Integer> otherNodes) {
final Set<Integer> unionNodeSet = new HashSet<>(nodeStates.keySet());
unionNodeSet.addAll(otherNodes);
return unionNodeSet;
}
@Override
public String toString() { return toString(false); }
String toString(boolean verbose) {
StringBuilder sb = new StringBuilder();
int nodeCount = verbose ? getLogicalNodeCount() : upNodes.length();
if ( nodeCount > 0 ) {
sb.append(type == NodeType.DISTRIBUTOR ? " distributor:" : " storage:").append(nodeCount);
for (int i = 0; i < nodeCount; i++) {
String nodeState = getNodeState(i).serialize(i, verbose);
if (!nodeState.isEmpty()) {
sb.append(' ').append(nodeState);
}
}
}
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (! (obj instanceof Nodes)) return false;
Nodes b = (Nodes) obj;
if (logicalNodeCount != b.logicalNodeCount) return false;
if (type != b.type) return false;
if (!upNodes.equals(b.upNodes)) return false;
if (!nodeStates.equals(b.nodeStates)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(logicalNodeCount, type, nodeStates, upNodes);
}
private NodeState defaultDown() {
return type == NodeType.STORAGE
? DEFAULT_STORAGE_DOWN_NODE_STATE
: DEFAULT_DISTRIBUTOR_DOWN_NODE_STATE;
}
private NodeState defaultUp() {
return defaultUpNodeState(type);
}
} |
Can't let upgrade be blocked indefinitely by deploying once a day. | protected double maintain() {
AtomicInteger attempts = new AtomicInteger();
AtomicInteger failures = new AtomicInteger();
Version systemVersion = controller().readSystemVersion();
for (Application application : controller().applications().readable())
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values())
try {
attempts.incrementAndGet();
JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get());
if ( ! deployment.zone().environment().isManuallyDeployed()) continue;
Run last = controller().jobController().last(job).get();
Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.of(last.versions().targetPlatform()), Optional.of(last.versions().targetApplication()));
if ( ! deployment.version().isBefore(target.targetPlatform())) continue;
if ( ! isLikelyNightFor(job)) continue;
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
controller().jobController().start(instance.id(), JobType.from(controller().system(), deployment.zone()).get(), target, true);
} catch (Exception e) {
failures.incrementAndGet();
log.log(Level.WARNING, "Failed upgrading " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
return asSuccessFactor(attempts.get(), failures.get());
} | if ( ! isLikelyNightFor(job)) continue; | protected double maintain() {
AtomicInteger attempts = new AtomicInteger();
AtomicInteger failures = new AtomicInteger();
Version systemVersion = controller().readSystemVersion();
for (Application application : controller().applications().readable())
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values())
try {
attempts.incrementAndGet();
JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get());
if ( ! deployment.zone().environment().isManuallyDeployed()) continue;
Run last = controller().jobController().last(job).get();
Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.of(last.versions().targetPlatform()), Optional.of(last.versions().targetApplication()));
if ( ! deployment.version().isBefore(target.targetPlatform())) continue;
if ( ! isLikelyNightFor(job)) continue;
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
controller().jobController().start(instance.id(), JobType.from(controller().system(), deployment.zone()).get(), target, true);
} catch (Exception e) {
failures.incrementAndGet();
log.log(Level.WARNING, "Failed upgrading " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
return asSuccessFactor(attempts.get(), failures.get());
} | class DeploymentUpgrader extends ControllerMaintainer {
public DeploymentUpgrader(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
private boolean isLikelyNightFor(JobId job) {
int hour = hourOf(controller().clock().instant());
int[] runStarts = controller().jobController().jobStarts(job).stream()
.mapToInt(DeploymentUpgrader::hourOf)
.toArray();
int localNight = mostLikelyWeeHour(runStarts);
return Math.abs(hour - localNight) <= 1;
}
static int mostLikelyWeeHour(int[] starts) {
double weight = 1;
double[] buckets = new double[24];
for (int start : starts)
buckets[start] += weight *= 0.8;
int best = -1;
double min = Double.MAX_VALUE;
for (int i = 12; i < 36; i++) {
double sum = 0;
for (int j = -12; j < 12; j++)
sum += buckets[(i + j) % 24] / (Math.abs(j) + 1);
if (sum < min) {
min = sum;
best = i;
}
}
return (best + 2) % 24;
}
private static int hourOf(Instant instant) {
return (int) (instant.toEpochMilli() / 3_600_000 % 24);
}
} | class DeploymentUpgrader extends ControllerMaintainer {
public DeploymentUpgrader(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
private boolean isLikelyNightFor(JobId job) {
int hour = hourOf(controller().clock().instant());
int[] runStarts = controller().jobController().jobStarts(job).stream()
.mapToInt(DeploymentUpgrader::hourOf)
.toArray();
int localNight = mostLikelyWeeHour(runStarts);
return Math.abs(hour - localNight) <= 1;
}
static int mostLikelyWeeHour(int[] starts) {
double weight = 1;
double[] buckets = new double[24];
for (int start : starts)
buckets[start] += weight *= 0.8;
int best = -1;
double min = Double.MAX_VALUE;
for (int i = 12; i < 36; i++) {
double sum = 0;
for (int j = -12; j < 12; j++)
sum += buckets[(i + j) % 24] / (Math.abs(j) + 1);
if (sum < min) {
min = sum;
best = i;
}
}
return (best + 2) % 24;
}
private static int hourOf(Instant instant) {
return (int) (instant.toEpochMilli() / 3_600_000 % 24);
}
} |
I guess someone could theoretically block upgrade by redeploying once an hour, or so, but until someone actually does this, let's leave this. The remedy for that would be to always upgrade if the version is more than, say, 7 days old. Then again, that someone could deploy with a particular runtime version once an hour ... and this is already possible today, so I don't think it's actually a problem. | protected double maintain() {
AtomicInteger attempts = new AtomicInteger();
AtomicInteger failures = new AtomicInteger();
Version systemVersion = controller().readSystemVersion();
for (Application application : controller().applications().readable())
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values())
try {
attempts.incrementAndGet();
JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get());
if ( ! deployment.zone().environment().isManuallyDeployed()) continue;
Run last = controller().jobController().last(job).get();
Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.of(last.versions().targetPlatform()), Optional.of(last.versions().targetApplication()));
if ( ! deployment.version().isBefore(target.targetPlatform())) continue;
if ( ! isLikelyNightFor(job)) continue;
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
controller().jobController().start(instance.id(), JobType.from(controller().system(), deployment.zone()).get(), target, true);
} catch (Exception e) {
failures.incrementAndGet();
log.log(Level.WARNING, "Failed upgrading " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
return asSuccessFactor(attempts.get(), failures.get());
} | if ( ! isLikelyNightFor(job)) continue; | protected double maintain() {
AtomicInteger attempts = new AtomicInteger();
AtomicInteger failures = new AtomicInteger();
Version systemVersion = controller().readSystemVersion();
for (Application application : controller().applications().readable())
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values())
try {
attempts.incrementAndGet();
JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get());
if ( ! deployment.zone().environment().isManuallyDeployed()) continue;
Run last = controller().jobController().last(job).get();
Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.of(last.versions().targetPlatform()), Optional.of(last.versions().targetApplication()));
if ( ! deployment.version().isBefore(target.targetPlatform())) continue;
if ( ! isLikelyNightFor(job)) continue;
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
controller().jobController().start(instance.id(), JobType.from(controller().system(), deployment.zone()).get(), target, true);
} catch (Exception e) {
failures.incrementAndGet();
log.log(Level.WARNING, "Failed upgrading " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
return asSuccessFactor(attempts.get(), failures.get());
} | class DeploymentUpgrader extends ControllerMaintainer {
public DeploymentUpgrader(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
private boolean isLikelyNightFor(JobId job) {
int hour = hourOf(controller().clock().instant());
int[] runStarts = controller().jobController().jobStarts(job).stream()
.mapToInt(DeploymentUpgrader::hourOf)
.toArray();
int localNight = mostLikelyWeeHour(runStarts);
return Math.abs(hour - localNight) <= 1;
}
static int mostLikelyWeeHour(int[] starts) {
double weight = 1;
double[] buckets = new double[24];
for (int start : starts)
buckets[start] += weight *= 0.8;
int best = -1;
double min = Double.MAX_VALUE;
for (int i = 12; i < 36; i++) {
double sum = 0;
for (int j = -12; j < 12; j++)
sum += buckets[(i + j) % 24] / (Math.abs(j) + 1);
if (sum < min) {
min = sum;
best = i;
}
}
return (best + 2) % 24;
}
private static int hourOf(Instant instant) {
return (int) (instant.toEpochMilli() / 3_600_000 % 24);
}
} | class DeploymentUpgrader extends ControllerMaintainer {
public DeploymentUpgrader(Controller controller, Duration interval) {
super(controller, interval);
}
@Override
private boolean isLikelyNightFor(JobId job) {
int hour = hourOf(controller().clock().instant());
int[] runStarts = controller().jobController().jobStarts(job).stream()
.mapToInt(DeploymentUpgrader::hourOf)
.toArray();
int localNight = mostLikelyWeeHour(runStarts);
return Math.abs(hour - localNight) <= 1;
}
static int mostLikelyWeeHour(int[] starts) {
double weight = 1;
double[] buckets = new double[24];
for (int start : starts)
buckets[start] += weight *= 0.8;
int best = -1;
double min = Double.MAX_VALUE;
for (int i = 12; i < 36; i++) {
double sum = 0;
for (int j = -12; j < 12; j++)
sum += buckets[(i + j) % 24] / (Math.abs(j) + 1);
if (sum < min) {
min = sum;
best = i;
}
}
return (best + 2) % 24;
}
private static int hourOf(Instant instant) {
return (int) (instant.toEpochMilli() / 3_600_000 % 24);
}
} |
```suggestion new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider).initService(deployLogger); ``` | private StorageNode buildSingleNode(DeployState deployState, ContentCluster parent) {
int distributionKey = 0;
StorageNode sNode = new StorageNode(deployState.getProperties(), parent.getStorageCluster(), 1.0, distributionKey , false);
sNode.setHostResource(parent.hostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC));
sNode.initService(deployLogger);
PersistenceEngine provider = parent.getPersistence().create(deployState, sNode, storageGroup, null);
new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider);
return sNode;
} | new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider); | private StorageNode buildSingleNode(DeployState deployState, ContentCluster parent) {
int distributionKey = 0;
StorageNode searchNode = new StorageNode(deployState.getProperties(), parent.getStorageCluster(), 1.0, distributionKey , false);
searchNode.setHostResource(parent.hostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC));
PersistenceEngine provider = parent.getPersistence().create(deployState, searchNode, storageGroup, null);
searchNode.initService(deployLogger);
Distributor distributor = new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider);
distributor.setHostResource(searchNode.getHostResource());
distributor.initService(deployLogger);
return searchNode;
} | class GroupBuilder {
private final StorageGroup storageGroup;
/* The explicitly defined subgroups of this */
private final List<GroupBuilder> subGroups;
private final List<XmlNodeBuilder> nodeBuilders;
/** The nodes explicitly specified as a nodes tag in this group, or empty if none */
private final Optional<NodesSpecification> nodeRequirement;
private final DeployLogger deployLogger;
private GroupBuilder(StorageGroup storageGroup, List<GroupBuilder> subGroups, List<XmlNodeBuilder> nodeBuilders,
Optional<NodesSpecification> nodeRequirement, DeployLogger deployLogger) {
this.storageGroup = storageGroup;
this.subGroups = subGroups;
this.nodeBuilders = nodeBuilders;
this.nodeRequirement = nodeRequirement;
this.deployLogger = deployLogger;
}
/**
* Builds a storage group for a nonhosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildNonHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildNonHosted(deployState, owner, Optional.of(this)));
}
for (XmlNodeBuilder nodeBuilder : nodeBuilders) {
storageGroup.nodes.add(nodeBuilder.build(deployState, owner, storageGroup));
}
if (parent.isEmpty() && subGroups.isEmpty() && nodeBuilders.isEmpty()) {
storageGroup.nodes.add(buildSingleNode(deployState, owner));
}
return storageGroup;
}
/**
* Builds a storage group for a hosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
if (storageGroup.getIndex() != null)
throw new IllegalArgumentException("Specifying individual groups is not supported on hosted applications");
Map<HostResource, ClusterMembership> hostMapping =
nodeRequirement.isPresent() ?
provisionHosts(nodeRequirement.get(), owner.getStorageCluster().getClusterName(), owner.getRoot().hostSystem(), deployLogger) :
Collections.emptyMap();
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroups = collectAllocatedSubgroups(hostMapping);
if (hostGroups.size() > 1) {
if (parent.isPresent())
throw new IllegalArgumentException("Cannot specify groups using the groups attribute in nested content groups");
for (Map.Entry<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroup : hostGroups.entrySet()) {
String groupIndex = String.valueOf(hostGroup.getKey().get().index());
StorageGroup subgroup = new StorageGroup(true, groupIndex, groupIndex);
for (Map.Entry<HostResource, ClusterMembership> host : hostGroup.getValue().entrySet()) {
subgroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), subgroup, host.getValue()));
}
storageGroup.subgroups.add(subgroup);
}
}
else {
for (Map.Entry<HostResource, ClusterMembership> host : hostMapping.entrySet()) {
storageGroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), storageGroup, host.getValue()));
}
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildHosted(deployState, owner, Optional.of(this)));
}
}
return storageGroup;
}
/** Collect hosts per group */
private Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> collectAllocatedSubgroups(Map<HostResource, ClusterMembership> hostMapping) {
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostsPerGroup = new LinkedHashMap<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hostMapping.entrySet()) {
Optional<ClusterSpec.Group> group = entry.getValue().cluster().group();
Map<HostResource, ClusterMembership> hostsInGroup = hostsPerGroup.get(group);
if (hostsInGroup == null) {
hostsInGroup = new LinkedHashMap<>();
hostsPerGroup.put(group, hostsInGroup);
}
hostsInGroup.put(entry.getKey(), entry.getValue());
}
return hostsPerGroup;
}
} | class GroupBuilder {
private final StorageGroup storageGroup;
/* The explicitly defined subgroups of this */
private final List<GroupBuilder> subGroups;
private final List<XmlNodeBuilder> nodeBuilders;
/** The nodes explicitly specified as a nodes tag in this group, or empty if none */
private final Optional<NodesSpecification> nodeRequirement;
private final DeployLogger deployLogger;
private GroupBuilder(StorageGroup storageGroup, List<GroupBuilder> subGroups, List<XmlNodeBuilder> nodeBuilders,
Optional<NodesSpecification> nodeRequirement, DeployLogger deployLogger) {
this.storageGroup = storageGroup;
this.subGroups = subGroups;
this.nodeBuilders = nodeBuilders;
this.nodeRequirement = nodeRequirement;
this.deployLogger = deployLogger;
}
/**
* Builds a storage group for a nonhosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildNonHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildNonHosted(deployState, owner, Optional.of(this)));
}
for (XmlNodeBuilder nodeBuilder : nodeBuilders) {
storageGroup.nodes.add(nodeBuilder.build(deployState, owner, storageGroup));
}
if (parent.isEmpty() && subGroups.isEmpty() && nodeBuilders.isEmpty()) {
storageGroup.nodes.add(buildSingleNode(deployState, owner));
}
return storageGroup;
}
/**
* Builds a storage group for a hosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
if (storageGroup.getIndex() != null)
throw new IllegalArgumentException("Specifying individual groups is not supported on hosted applications");
Map<HostResource, ClusterMembership> hostMapping =
nodeRequirement.isPresent() ?
provisionHosts(nodeRequirement.get(), owner.getStorageCluster().getClusterName(), owner.getRoot().hostSystem(), deployLogger) :
Collections.emptyMap();
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroups = collectAllocatedSubgroups(hostMapping);
if (hostGroups.size() > 1) {
if (parent.isPresent())
throw new IllegalArgumentException("Cannot specify groups using the groups attribute in nested content groups");
for (Map.Entry<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroup : hostGroups.entrySet()) {
String groupIndex = String.valueOf(hostGroup.getKey().get().index());
StorageGroup subgroup = new StorageGroup(true, groupIndex, groupIndex);
for (Map.Entry<HostResource, ClusterMembership> host : hostGroup.getValue().entrySet()) {
subgroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), subgroup, host.getValue()));
}
storageGroup.subgroups.add(subgroup);
}
}
else {
for (Map.Entry<HostResource, ClusterMembership> host : hostMapping.entrySet()) {
storageGroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), storageGroup, host.getValue()));
}
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildHosted(deployState, owner, Optional.of(this)));
}
}
return storageGroup;
}
/** Collect hosts per group */
private Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> collectAllocatedSubgroups(Map<HostResource, ClusterMembership> hostMapping) {
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostsPerGroup = new LinkedHashMap<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hostMapping.entrySet()) {
Optional<ClusterSpec.Group> group = entry.getValue().cluster().group();
Map<HostResource, ClusterMembership> hostsInGroup = hostsPerGroup.get(group);
if (hostsInGroup == null) {
hostsInGroup = new LinkedHashMap<>();
hostsPerGroup.put(group, hostsInGroup);
}
hostsInGroup.put(entry.getKey(), entry.getValue());
}
return hostsPerGroup;
}
} |
```suggestion RPRService service = cache.computeIfAbsent(pattern, key -> RPCService.create(net.getMirror(), key)); ``` | public RPCServiceAddress resolve(String pattern) {
ServiceLRUCache cache = getPerThreadCache();
RPCService service = cache.get(pattern);
if (service == null) {
service = RPCService.create(net.getMirror(), pattern);
cache.put(pattern, service);
}
return service.resolve();
} | } | public RPCServiceAddress resolve(String pattern) {
return getPerThreadCache().computeIfAbsent(pattern, (key) -> RPCService.create(net.getMirror(), key)).resolve();
} | class RPCServicePool {
private final RPCNetwork net;
private final Map<Long, ServiceLRUCache> mapOfServiceCache;
private final int maxSize;
/**
* Create a new service pool for the given network.
*
* @param net The underlying RPC network.
* @param maxSize The max number of services to cache.
*/
public RPCServicePool(RPCNetwork net, int maxSize) {
this.net = net;
mapOfServiceCache = new CopyOnWriteHashMap<>();
this.maxSize = maxSize;
}
/**
* Returns the RPCServiceAddress that corresponds to a given pattern. This reuses the RPCService object for matching
* pattern so that load balancing is possible on the network level.
*
* @param pattern The pattern for the service we require.
* @return A service address for the given pattern.
*/
private ServiceLRUCache getPerThreadCache() {
return mapOfServiceCache.computeIfAbsent(Thread.currentThread().getId(), (key) -> new ServiceLRUCache(maxSize));
}
/**
* Returns the number of services available in the pool. This number will never exceed the limit given at
* construction time.
*
* @return The current size of this pool.
*/
public int getSize() {
return getPerThreadCache().size();
}
/**
* Returns whether or not there is a service available in the pool the corresponds to the given pattern.
*
* @param pattern The pattern to check for.
* @return True if a corresponding service is in the pool.
*/
public boolean hasService(String pattern) {
return getPerThreadCache().containsKey(pattern);
}
private static class ServiceLRUCache extends LinkedHashMap<String, RPCService> {
private final int maxSize;
ServiceLRUCache(int maxSize) {
super(16, 0.75f, true);
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<String, RPCService> entry) {
return size() > maxSize;
}
}
} | class RPCServicePool {
private final RPCNetwork net;
private final Map<Long, ServiceLRUCache> mapOfServiceCache;
private final int maxSize;
/**
* Create a new service pool for the given network.
*
* @param net The underlying RPC network.
* @param maxSize The max number of services to cache.
*/
public RPCServicePool(RPCNetwork net, int maxSize) {
this.net = net;
mapOfServiceCache = new CopyOnWriteHashMap<>();
this.maxSize = maxSize;
}
/**
* Returns the RPCServiceAddress that corresponds to a given pattern. This reuses the RPCService object for matching
* pattern so that load balancing is possible on the network level.
*
* @param pattern The pattern for the service we require.
* @return A service address for the given pattern.
*/
private ServiceLRUCache getPerThreadCache() {
return mapOfServiceCache.computeIfAbsent(Thread.currentThread().getId(), (key) -> new ServiceLRUCache(maxSize));
}
/**
* Returns the number of services available in the pool. This number will never exceed the limit given at
* construction time.
*
* @return The current size of this pool.
*/
public int getSize() {
return getPerThreadCache().size();
}
/**
* Returns whether or not there is a service available in the pool the corresponds to the given pattern.
*
* @param pattern The pattern to check for.
* @return True if a corresponding service is in the pool.
*/
public boolean hasService(String pattern) {
return getPerThreadCache().containsKey(pattern);
}
private static class ServiceLRUCache extends LinkedHashMap<String, RPCService> {
private final int maxSize;
ServiceLRUCache(int maxSize) {
super(16, 0.75f, true);
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<String, RPCService> entry) {
return size() > maxSize;
}
}
} |
Huh, that diff makes no sense :) | public RPCServiceAddress resolve(String pattern) {
ServiceLRUCache cache = getPerThreadCache();
RPCService service = cache.get(pattern);
if (service == null) {
service = RPCService.create(net.getMirror(), pattern);
cache.put(pattern, service);
}
return service.resolve();
} | } | public RPCServiceAddress resolve(String pattern) {
return getPerThreadCache().computeIfAbsent(pattern, (key) -> RPCService.create(net.getMirror(), key)).resolve();
} | class RPCServicePool {
private final RPCNetwork net;
private final Map<Long, ServiceLRUCache> mapOfServiceCache;
private final int maxSize;
/**
* Create a new service pool for the given network.
*
* @param net The underlying RPC network.
* @param maxSize The max number of services to cache.
*/
public RPCServicePool(RPCNetwork net, int maxSize) {
this.net = net;
mapOfServiceCache = new CopyOnWriteHashMap<>();
this.maxSize = maxSize;
}
/**
* Returns the RPCServiceAddress that corresponds to a given pattern. This reuses the RPCService object for matching
* pattern so that load balancing is possible on the network level.
*
* @param pattern The pattern for the service we require.
* @return A service address for the given pattern.
*/
private ServiceLRUCache getPerThreadCache() {
return mapOfServiceCache.computeIfAbsent(Thread.currentThread().getId(), (key) -> new ServiceLRUCache(maxSize));
}
/**
* Returns the number of services available in the pool. This number will never exceed the limit given at
* construction time.
*
* @return The current size of this pool.
*/
public int getSize() {
return getPerThreadCache().size();
}
/**
* Returns whether or not there is a service available in the pool the corresponds to the given pattern.
*
* @param pattern The pattern to check for.
* @return True if a corresponding service is in the pool.
*/
public boolean hasService(String pattern) {
return getPerThreadCache().containsKey(pattern);
}
private static class ServiceLRUCache extends LinkedHashMap<String, RPCService> {
private final int maxSize;
ServiceLRUCache(int maxSize) {
super(16, 0.75f, true);
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<String, RPCService> entry) {
return size() > maxSize;
}
}
} | class RPCServicePool {
private final RPCNetwork net;
private final Map<Long, ServiceLRUCache> mapOfServiceCache;
private final int maxSize;
/**
* Create a new service pool for the given network.
*
* @param net The underlying RPC network.
* @param maxSize The max number of services to cache.
*/
public RPCServicePool(RPCNetwork net, int maxSize) {
this.net = net;
mapOfServiceCache = new CopyOnWriteHashMap<>();
this.maxSize = maxSize;
}
/**
* Returns the RPCServiceAddress that corresponds to a given pattern. This reuses the RPCService object for matching
* pattern so that load balancing is possible on the network level.
*
* @param pattern The pattern for the service we require.
* @return A service address for the given pattern.
*/
private ServiceLRUCache getPerThreadCache() {
return mapOfServiceCache.computeIfAbsent(Thread.currentThread().getId(), (key) -> new ServiceLRUCache(maxSize));
}
/**
* Returns the number of services available in the pool. This number will never exceed the limit given at
* construction time.
*
* @return The current size of this pool.
*/
public int getSize() {
return getPerThreadCache().size();
}
/**
* Returns whether or not there is a service available in the pool the corresponds to the given pattern.
*
* @param pattern The pattern to check for.
* @return True if a corresponding service is in the pool.
*/
public boolean hasService(String pattern) {
return getPerThreadCache().containsKey(pattern);
}
private static class ServiceLRUCache extends LinkedHashMap<String, RPCService> {
private final int maxSize;
ServiceLRUCache(int maxSize) {
super(16, 0.75f, true);
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<String, RPCService> entry) {
return size() > maxSize;
}
}
} |
Simplify even more | public RPCServiceAddress resolve(String pattern) {
ServiceLRUCache cache = getPerThreadCache();
RPCService service = cache.get(pattern);
if (service == null) {
service = RPCService.create(net.getMirror(), pattern);
cache.put(pattern, service);
}
return service.resolve();
} | } | public RPCServiceAddress resolve(String pattern) {
return getPerThreadCache().computeIfAbsent(pattern, (key) -> RPCService.create(net.getMirror(), key)).resolve();
} | class RPCServicePool {
private final RPCNetwork net;
private final Map<Long, ServiceLRUCache> mapOfServiceCache;
private final int maxSize;
/**
* Create a new service pool for the given network.
*
* @param net The underlying RPC network.
* @param maxSize The max number of services to cache.
*/
public RPCServicePool(RPCNetwork net, int maxSize) {
this.net = net;
mapOfServiceCache = new CopyOnWriteHashMap<>();
this.maxSize = maxSize;
}
/**
* Returns the RPCServiceAddress that corresponds to a given pattern. This reuses the RPCService object for matching
* pattern so that load balancing is possible on the network level.
*
* @param pattern The pattern for the service we require.
* @return A service address for the given pattern.
*/
private ServiceLRUCache getPerThreadCache() {
return mapOfServiceCache.computeIfAbsent(Thread.currentThread().getId(), (key) -> new ServiceLRUCache(maxSize));
}
/**
* Returns the number of services available in the pool. This number will never exceed the limit given at
* construction time.
*
* @return The current size of this pool.
*/
public int getSize() {
return getPerThreadCache().size();
}
/**
* Returns whether or not there is a service available in the pool the corresponds to the given pattern.
*
* @param pattern The pattern to check for.
* @return True if a corresponding service is in the pool.
*/
public boolean hasService(String pattern) {
return getPerThreadCache().containsKey(pattern);
}
private static class ServiceLRUCache extends LinkedHashMap<String, RPCService> {
private final int maxSize;
ServiceLRUCache(int maxSize) {
super(16, 0.75f, true);
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<String, RPCService> entry) {
return size() > maxSize;
}
}
} | class RPCServicePool {
private final RPCNetwork net;
private final Map<Long, ServiceLRUCache> mapOfServiceCache;
private final int maxSize;
/**
* Create a new service pool for the given network.
*
* @param net The underlying RPC network.
* @param maxSize The max number of services to cache.
*/
public RPCServicePool(RPCNetwork net, int maxSize) {
this.net = net;
mapOfServiceCache = new CopyOnWriteHashMap<>();
this.maxSize = maxSize;
}
/**
* Returns the RPCServiceAddress that corresponds to a given pattern. This reuses the RPCService object for matching
* pattern so that load balancing is possible on the network level.
*
* @param pattern The pattern for the service we require.
* @return A service address for the given pattern.
*/
private ServiceLRUCache getPerThreadCache() {
return mapOfServiceCache.computeIfAbsent(Thread.currentThread().getId(), (key) -> new ServiceLRUCache(maxSize));
}
/**
* Returns the number of services available in the pool. This number will never exceed the limit given at
* construction time.
*
* @return The current size of this pool.
*/
public int getSize() {
return getPerThreadCache().size();
}
/**
* Returns whether or not there is a service available in the pool the corresponds to the given pattern.
*
* @param pattern The pattern to check for.
* @return True if a corresponding service is in the pool.
*/
public boolean hasService(String pattern) {
return getPerThreadCache().containsKey(pattern);
}
private static class ServiceLRUCache extends LinkedHashMap<String, RPCService> {
private final int maxSize;
ServiceLRUCache(int maxSize) {
super(16, 0.75f, true);
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<String, RPCService> entry) {
return size() > maxSize;
}
}
} |
This is the part that you _can_ still do. | public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
} | .build(); | public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered dev-us-east-1 for tenant1.application1.instance1\"}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/job/dev-us-east-1/diff/1", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("--- search-definitions/test.sd\n" +
"@@ -1,0 +1,1 @@\n" +
"+ search test { }\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"],\"dumpOptions\":{\"duration\":30}}"),
"{\"message\":\"Request created\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", GET)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"" +
",\"artifacts\":[\"jvm-dump\"],\"dumpOptions\":{\"duration\":30}}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
"- <deployment version='1.0' >\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).properties(Map.of("includeDeleted", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("tenant1-deleted.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).properties(Map.of("forget", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).properties(Map.of("includeDeleted", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404);
}
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
@Test
public void testErrorResponses() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).properties(Map.of("forget", "true"))
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Only operators can forget a tenant\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered dev-us-east-1 for tenant1.application1.instance1\"}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/job/dev-us-east-1/diff/1", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("--- search-definitions/test.sd\n" +
"@@ -1,0 +1,1 @@\n" +
"+ search test { }\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"],\"dumpOptions\":{\"duration\":30}}"),
"{\"message\":\"Request created\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", GET)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"" +
",\"artifacts\":[\"jvm-dump\"],\"dumpOptions\":{\"duration\":30}}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
"- <deployment version='1.0' >\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).properties(Map.of("includeDeleted", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("tenant1-deleted.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).properties(Map.of("forget", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).properties(Map.of("includeDeleted", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404);
}
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployWithApplicationPackage() {
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester()
.upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get()
.versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
@Test
public void testErrorResponses() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).properties(Map.of("forget", "true"))
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Only operators can forget a tenant\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} |
Fixed | public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
} | .build(); | public void testRemovingAllDeployments() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 432L);
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
tester.controller().jobController().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage);
assertEquals(Set.of(ZoneId.from("prod.us-west-1"), ZoneId.from("prod.us-east-3"), ZoneId.from("prod.eu-west-1"), ZoneId.from("dev.us-east-1")),
app.instance().deployments().keySet());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"All deployments removed\"}");
assertEquals(Set.of(ZoneId.from("dev.us-east-1")), app.instance().deployments().keySet());
} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered dev-us-east-1 for tenant1.application1.instance1\"}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/job/dev-us-east-1/diff/1", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("--- search-definitions/test.sd\n" +
"@@ -1,0 +1,1 @@\n" +
"+ search test { }\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"],\"dumpOptions\":{\"duration\":30}}"),
"{\"message\":\"Request created\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", GET)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"" +
",\"artifacts\":[\"jvm-dump\"],\"dumpOptions\":{\"duration\":30}}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
"- <deployment version='1.0' >\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).properties(Map.of("includeDeleted", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("tenant1-deleted.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).properties(Map.of("forget", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).properties(Map.of("includeDeleted", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404);
}
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
@Test
public void testErrorResponses() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).properties(Map.of("forget", "true"))
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Only operators can forget a tenant\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} | class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final String accessDenied = "{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId OTHER_USER_ID = new UserId("otheruser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaIdentityToken OKTA_IT = new OktaIdentityToken("okta-it");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.he0ErCNloe4J7Id0Ry2SEDg09lKkZkfsRiGsdX_vgEg");
private ContainerTester tester;
private DeploymentTester deploymentTester;
@Before
public void before() {
tester = new ContainerTester(container, responseFiles);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
}
@Test
public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.controller().tenants().updateLastLogin(TenantName.from("tenant2"),
List.of(LastLoginInfo.UserLevel.user, LastLoginInfo.UserLevel.administrator), Instant.ofEpochMilli(1234));
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID)
.properties(Map.of("activeInstances", "true")),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/fake-app/instance/", GET).userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Application 'fake-app' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("tenant-with-empty-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID)
.properties(Map.of("recursive", "true",
"production", "true")),
new File("application-without-instances.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
var app1 = deploymentTester.newDeploymentContext(id);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Direct deployments are only allowed to manually deployed environments.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/production-us-east-3/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deployment started in run 1 of production-us-east-3 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.productionUsEast3);
tester.controller().applications().deactivate(app1.instanceId(), ZoneId.from("prod", "us-east-3"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1/", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered dev-us-east-1 for tenant1.application1.instance1\"}");
app1.runJob(JobType.devUsEast1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1.instance1.dev.us-east-1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/deploy/dev-us-east-1", POST)
.userIdentity(OTHER_USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1)),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/otheruser/environment/dev/region/us-east-1", DELETE)
.userIdentity(OTHER_USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackageInstance1)),
new File("deployment-job-accepted-2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/job/dev-us-east-1/diff/1", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("--- search-definitions/test.sd\n" +
"@@ -1,0 +1,1 @@\n" +
"+ search test { }\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.myuser in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.myuser\"}");
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
id.application());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
app1.runJob(JobType.systemTest).runJob(JobType.stagingTest).runJob(JobType.productionUsCentral1);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.allow(ValidationId.globalEndpointChange)
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-2.json"));
ApplicationId id2 = ApplicationId.from("tenant2", "application2", "instance1");
var app2 = deploymentTester.newDeploymentContext(id2);
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
id2.application());
deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
deploymentTester.triggerJobs();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"skipTests\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
app2.runJob(JobType.productionUsWest1);
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/job/production-us-west-1", POST)
.data("{\"reTrigger\":true}")
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant2.application2.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/instance1/environment/prod/region/us-west-1", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deactivated tenant2.application2.instance1 in prod.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/compile-version", GET)
.userIdentity(USER_ID),
"{\"compileVersion\":\"6.1.0\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant2.application2.default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant2.application2\"}");
deploymentTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
deploymentTester.controllerTester().computeVersionStatus();
setDeploymentMaintainedInfo();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(deploymentTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("instance1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/clusters", GET)
.userIdentity(USER_ID),
new File("application-clusters.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/prod/region/controller/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/", GET).userIdentity(USER_ID),
"{\"path\":\"/\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/instance/default/environment/dev/region/us-east-1/content/bar/file.json?query=param", GET).userIdentity(USER_ID),
"{\"path\":\"/bar/file.json\"}");
updateMetrics();
tester.assertJsonResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-east-1/instance/default/metrics", GET)
.userIdentity(USER_ID),
new File("proton-metrics.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.1-commit1' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for tenant1.application1.instance1 at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin?")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 resumed\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindex", POST)
.properties(Map.of("clusterId", "boo,moo",
"documentType", "foo,boo"))
.userIdentity(USER_ID),
"{\"message\":\"Requested reindexing of tenant1.application1.instance1 in prod.us-central-1, on clusters boo, moo, for types foo, boo\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", POST)
.userIdentity(USER_ID),
"{\"message\":\"Enabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Disabled reindexing of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/reindexing", GET)
.userIdentity(USER_ID),
"{\"enabled\":true,\"clusters\":[{\"name\":\"cluster\",\"pending\":[{\"type\":\"type\",\"requiredGeneration\":100}],\"ready\":[{\"type\":\"type\",\"readyAtMillis\":345,\"startedAtMillis\":456,\"endedAtMillis\":567,\"state\":\"failed\",\"message\":\"(#`д´)ノ\",\"progress\":0.1}]}]}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"],\"dumpOptions\":{\"duration\":30}}"),
"{\"message\":\"Request created\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", GET)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"" +
",\"artifacts\":[\"jvm-dump\"],\"dumpOptions\":{\"duration\":30}}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
addUserToHostedOperatorRole(HostedAthenzIdentities.from(SCREWDRIVER_ID));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-east-3\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.properties(Map.of("hostname", "node-1-tenant-host-prod.us-central-1"))
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}", 200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", POST)
.userIdentity(USER_ID),
"{\"message\":\"Suspended orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/suspend", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Resumed orchestration of tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/suspend", POST)
.userIdentity(USER_ID),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-east-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "default"),
JobType.productionUsCentral1,
Optional.empty(),
applicationPackageDefault);
tester.controller().jobController().deploy(ApplicationId.from("tenant1", "application1", "my-user"),
JobType.devUsEast1,
Optional.empty(),
applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/dev-us-east-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config-dev.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/my-user/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "default"),
ZoneId.from("prod", "us-central-1"));
tester.controller().applications().deactivate(ApplicationId.from("tenant1", "application1", "my-user"),
ZoneId.from("dev", "us-east-1"));
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString(),
response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
"- <deployment version='1.0' >\n")),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build2.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(packageWithService.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "1"))
.userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
assertEquals("attachment; filename=\"tenant1.application1-build1.zip\"", response.getHeaders().getFirst("Content-Disposition"));
assertArrayEquals(applicationPackageInstance1.zippedContent(), response.getBody());
},
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService, 123);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.3-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
.instances("instance1,instance2")
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.endpoint("default", "foo", "us-central-1", "us-west-1", "us-east-3")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
"{\"message\":\"Application package version: 1.0.4-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deployment", GET)
.userIdentity(USER_ID),
new File("deployment-overview.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 2 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
addNotifications(TenantName.from("tenant1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
new File("notifications-tenant1.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
.properties(Map.of("application", "app2")).userIdentity(USER_ID),
new File("notifications-tenant1-app2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).properties(Map.of("includeDeleted", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("tenant1-deleted.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).properties(Map.of("forget", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).properties(Map.of("includeDeleted", "true"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404);
}
private void addIssues(DeploymentTester tester, TenantAndApplicationId id) {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
var westZone = ZoneId.from("prod", "us-west-1");
var eastZone = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region(westZone.region())
.region(eastZone.region())
.build();
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-central-1\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.tenant);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.in, GlobalRouting.Agent.tenant);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
assertGlobalRouting(app.deploymentIdIn(westZone), GlobalRouting.Status.out, GlobalRouting.Agent.operator);
}
@Test
public void multiple_endpoints() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
var app = deploymentTester.newDeploymentContext("tenant1", "application1", "instance1");
app.submit(applicationPackage).deploy();
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "default"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation", GET)
.properties(Map.of("endpointId", "eu"))
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
}
@Test
public void testDeployWithApplicationPackage() {
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
deploymentTester.controllerTester()
.upgradeSystem(deploymentTester.controller().readVersionStatus().controllerVersion().get()
.versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = tester.serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246), ZoneId.defaultId()),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492), ZoneId.defaultId())));
mockMeteringClient.setMeteringData(new MeteringData(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance1-metering.json"));
}
@Test
@Test
public void testErrorResponses() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package has been submitted for 'tenant1.application1'\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"No application package found for 'tenant1.application1' with build number 42\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "foobar"))
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid build number: For input string: \\\"foobar\\\"\"}",
400);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-east-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of tenant1.application1.instance1 is not supported through this API\"}", 400);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).properties(Map.of("forget", "true"))
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Only operators can forget a tenant\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted tenant tenant1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
accessDenied,
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty(), Instant.EPOCH, LastLoginInfo.EMPTY));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("instance-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
accessDenied,
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT),
new File("tenant1.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
accessDenied,
403);
}
@Test
public void athenz_service_must_be_allowed_to_launch_and_be_under_tenant_domain() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
deploymentTester.controllerTester().createTenant("tenant1", ATHENZ_TENANT_DOMAIN.getName(), 1234L);
var application = deploymentTester.newDeploymentContext("tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId("123");
addScrewdriverUserToDeployRole(screwdriverId, ATHENZ_TENANT_DOMAIN, application.instanceId().application());
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}",
400);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
"{\"message\":\"Application package version: 1.0.1-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
}
@Test
public void personal_deployment_with_athenz_service_requires_user_is_admin() {
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
createTenantAndApplication();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
accessDenied,
403);
tester.athenzClientFactory().getSetup()
.domains.get(ATHENZ_TENANT_DOMAIN)
.admin(HostedAthenzIdentities.from(userId));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/new-user/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(userId),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.new-user. This may take about 15 minutes the first time.\",\"run\":1}");
}
@Test
public void developers_can_deploy_when_privileged() {
UserId tenantAdmin = new UserId("tenant-admin");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
allowLaunchOfService(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"));
UserId developer = new UserId("developer");
AthenzDomain sandboxDomain = new AthenzDomain("sandbox");
createAthenzDomainWithAdmin(sandboxDomain, developer);
AthenzTenantSpec tenantSpec = new AthenzTenantSpec(TenantName.from("sandbox"),
sandboxDomain,
new Property("vespa"),
Optional.empty());
AthenzCredentials credentials = new AthenzCredentials(
new AthenzPrincipal(new AthenzUser(developer.id())), sandboxDomain, OKTA_IT, OKTA_AT);
tester.controller().tenants().create(tenantSpec, credentials);
tester.controller().applications().createApplication(TenantAndApplicationId.from("sandbox", "myapp"), credentials);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.developer is not allowed to launch service domain1.service. Please reach out to the domain admin.\"}",
400);
AthenzDbMock.Domain domainMock = tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN);
domainMock.withPolicy("user." + developer.id(), "launch", "service.service");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer),
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":1}",
200);
UserId developer2 = new UserId("developer2");
tester.athenzClientFactory().getSetup().getOrCreateDomain(sandboxDomain).tenantAdmin(new AthenzUser(developer2.id()));
tester.athenzClientFactory().getSetup().getOrCreateDomain(ATHENZ_TENANT_DOMAIN).tenantAdmin(new AthenzUser(developer2.id()));
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(entity)
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 2 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":2}",
200);
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/zip")
.userIdentity(developer2),
"{\"message\":\"Deployment started in run 3 of dev-us-east-1 for sandbox.myapp. This may take about 15 minutes the first time.\",\"run\":3}");
tester.assertResponse(request("/application/v4/tenant/sandbox/application/myapp/instance/default/deploy/dev-us-east-1", POST)
.data(applicationPackageInstance1.zippedContent())
.contentType("application/gzip")
.userIdentity(developer2),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Expected a multipart or application/zip message, but got Content-Type: application/gzip\"}", 400);
}
@Test
public void applicationWithRoutingPolicy() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
app.addInactiveRoutingPolicy(zone);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("instance-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID)
.properties(Map.of("includeLegacyEndpoints", "true")),
new File("deployment-with-routing-policy-legacy.json"));
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment-without-shared-endpoints.json"));
}
@Test
public void support_access() {
var app = deploymentTester.newDeploymentContext(createTenantAndApplication());
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
.instances("instance1")
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
"{\"state\":{\"supportAccess\":\"NOT_ALLOWED\"},\"history\":[],\"grants\":[]}", 200
);
Instant now = tester.controller().clock().instant().truncatedTo(ChronoUnit.SECONDS);
String allowedResponse = "{\"state\":{\"supportAccess\":\"ALLOWED\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"},\"history\":[{\"state\":\"allowed\",\"at\":\"" + serializeInstant(now)
+ "\",\"until\":\"" + serializeInstant(now.plus(7, ChronoUnit.DAYS))
+ "\",\"by\":\"user.myuser\"}],\"grants\":[]}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", POST)
.userIdentity(USER_ID),
allowedResponse, 200
);
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
String grantPayload= "{\n" +
" \"applicationId\": \"tenant1:application1:instance1\",\n" +
" \"zone\": \"prod.us-west-1\",\n" +
" \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
"}";
tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
.data(grantPayload)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
"\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
app.assertRunning(JobType.productionUsWest1);
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"} |
Use -4 for non hosted, but same approach. | public void getConfig(ContainerThreadpoolConfig.Builder builder) {
super.getConfig(builder);
builder.maxThreadExecutionTimeSeconds(190);
builder.keepAliveTime(5.0);
if (hasUserOptions()) return;
if (cluster.isHostedVespa()) {
builder.maxThreads(-2).minThreads(8).queueSize(-40);
} else {
builder.maxThreads(500).minThreads(500).queueSize(0);
}
} | builder.maxThreads(500).minThreads(500).queueSize(0); | public void getConfig(ContainerThreadpoolConfig.Builder builder) {
super.getConfig(builder);
builder.maxThreadExecutionTimeSeconds(190);
builder.keepAliveTime(5.0);
if (hasUserOptions()) return;
if (cluster.isHostedVespa()) {
builder.maxThreads(-2).minThreads(-2).queueSize(-40);
} else {
builder.maxThreads(-4).minThreads(-4).queueSize(-40);
}
} | class Threadpool extends ContainerThreadpool {
private final ApplicationContainerCluster cluster;
Threadpool(ApplicationContainerCluster cluster, UserOptions options) {
super("search-handler", options);
this.cluster = cluster;
}
@Override
} | class Threadpool extends ContainerThreadpool {
private final ApplicationContainerCluster cluster;
Threadpool(ApplicationContainerCluster cluster, UserOptions options) {
super("search-handler", options);
this.cluster = cluster;
}
@Override
} |
Use a named constant that gives a hint to purpose. | private int queueSize(ContainerThreadpoolConfig config, int maxThreads) {
return config.queueSize() >= 0 ? config.queueSize() : Math.max(650, Math.abs(config.queueSize()) * maxThreads);
} | return config.queueSize() >= 0 ? config.queueSize() : Math.max(650, Math.abs(config.queueSize()) * maxThreads); | private int queueSize(ContainerThreadpoolConfig config, int maxThreads) {
return config.queueSize() >= 0 ? config.queueSize() : Math.max(MIN_QUEUE_SIZE, Math.abs(config.queueSize()) * maxThreads);
} | class DefaultContainerThreadpool extends AbstractComponent implements AutoCloseable, ContainerThreadPool {
private static final Logger log = Logger.getLogger(DefaultContainerThreadpool.class.getName());
private final ExecutorServiceWrapper threadpool;
@Inject
public DefaultContainerThreadpool(ContainerThreadpoolConfig config, Metric metric) {
this(config, metric, new ProcessTerminator());
}
public DefaultContainerThreadpool(ContainerThreadpoolConfig config, Metric metric, ProcessTerminator processTerminator) {
this(config, metric, processTerminator, Runtime.getRuntime().availableProcessors());
}
DefaultContainerThreadpool(ContainerThreadpoolConfig config, Metric metric, ProcessTerminator processTerminator,
int cpus) {
String name = config.name();
int maxThreads = maxThreads(config, cpus);
int minThreads = minThreads(config, maxThreads, cpus);
int queueSize = queueSize(config, maxThreads);
log.info(String.format("Threadpool '%s': min=%d, max=%d, queue=%d", name, minThreads, maxThreads, queueSize));
ThreadPoolMetric threadPoolMetric = new ThreadPoolMetric(metric, name);
WorkerCompletionTimingThreadPoolExecutor executor =
new WorkerCompletionTimingThreadPoolExecutor(minThreads, maxThreads,
(int)config.keepAliveTime() * 1000, TimeUnit.MILLISECONDS,
createQueue(queueSize),
ThreadFactoryFactory.getThreadFactory(name),
threadPoolMetric);
executor.prestartAllCoreThreads();
threadpool = new ExecutorServiceWrapper(
executor, threadPoolMetric, processTerminator, config.maxThreadExecutionTimeSeconds() * 1000L,
name, queueSize);
}
@Override public Executor executor() { return threadpool; }
@Override public void close() { closeInternal(); }
@Override public void deconstruct() { closeInternal(); super.deconstruct(); }
/**
* Shutdown the thread pool, give a grace period of 1 second before forcibly
* shutting down all worker threads.
*/
private void closeInternal() {
boolean terminated;
threadpool.shutdown();
try {
terminated = threadpool.awaitTermination(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
if (!terminated) {
threadpool.shutdownNow();
}
}
private static BlockingQueue<Runnable> createQueue(int size) {
return size == 0 ? new SynchronousQueue<>(false) : new ArrayBlockingQueue<>(size);
}
private static int maxThreads(ContainerThreadpoolConfig config, int cpus) {
if (config.maxThreads() > 0) return config.maxThreads();
else if (config.maxThreads() == 0) return 4 * cpus;
else return Math.abs(config.maxThreads()) * cpus;
}
private static int minThreads(ContainerThreadpoolConfig config, int max, int cpus) {
int threads;
if (config.minThreads() > 0) threads = config.minThreads();
else if (config.minThreads() == 0) threads = 4 * cpus;
else threads = Math.abs(config.minThreads()) * cpus;
return Math.min(threads, max);
}
} | class DefaultContainerThreadpool extends AbstractComponent implements AutoCloseable, ContainerThreadPool {
private static final Logger log = Logger.getLogger(DefaultContainerThreadpool.class.getName());
private static final int MIN_QUEUE_SIZE = 650;
private static final int MIN_THREADS_WHEN_SCALE_FACTOR = 8;
private final ExecutorServiceWrapper threadpool;
@Inject
public DefaultContainerThreadpool(ContainerThreadpoolConfig config, Metric metric) {
this(config, metric, new ProcessTerminator());
}
public DefaultContainerThreadpool(ContainerThreadpoolConfig config, Metric metric, ProcessTerminator processTerminator) {
this(config, metric, processTerminator, Runtime.getRuntime().availableProcessors());
}
DefaultContainerThreadpool(ContainerThreadpoolConfig config, Metric metric, ProcessTerminator processTerminator,
int cpus) {
String name = config.name();
int maxThreads = maxThreads(config, cpus);
int minThreads = minThreads(config, maxThreads, cpus);
int queueSize = queueSize(config, maxThreads);
log.info(String.format("Threadpool '%s': min=%d, max=%d, queue=%d", name, minThreads, maxThreads, queueSize));
ThreadPoolMetric threadPoolMetric = new ThreadPoolMetric(metric, name);
WorkerCompletionTimingThreadPoolExecutor executor =
new WorkerCompletionTimingThreadPoolExecutor(minThreads, maxThreads,
(int)config.keepAliveTime() * 1000, TimeUnit.MILLISECONDS,
createQueue(queueSize),
ThreadFactoryFactory.getThreadFactory(name),
threadPoolMetric);
executor.prestartAllCoreThreads();
threadpool = new ExecutorServiceWrapper(
executor, threadPoolMetric, processTerminator, config.maxThreadExecutionTimeSeconds() * 1000L,
name, queueSize);
}
@Override public Executor executor() { return threadpool; }
@Override public void close() { closeInternal(); }
@Override public void deconstruct() { closeInternal(); super.deconstruct(); }
/**
* Shutdown the thread pool, give a grace period of 1 second before forcibly
* shutting down all worker threads.
*/
private void closeInternal() {
boolean terminated;
threadpool.shutdown();
try {
terminated = threadpool.awaitTermination(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
if (!terminated) {
threadpool.shutdownNow();
}
}
private static BlockingQueue<Runnable> createQueue(int size) {
return size == 0 ? new SynchronousQueue<>(false) : new ArrayBlockingQueue<>(size);
}
private static int maxThreads(ContainerThreadpoolConfig config, int cpus) {
if (config.maxThreads() > 0) return config.maxThreads();
else if (config.maxThreads() == 0) return 4 * cpus;
else return Math.max(MIN_THREADS_WHEN_SCALE_FACTOR, Math.abs(config.maxThreads()) * cpus);
}
private static int minThreads(ContainerThreadpoolConfig config, int max, int cpus) {
int threads;
if (config.minThreads() > 0) threads = config.minThreads();
else if (config.minThreads() == 0) threads = 4 * cpus;
else threads = Math.max(MIN_THREADS_WHEN_SCALE_FACTOR, Math.abs(config.minThreads()) * cpus);
return Math.min(threads, max);
}
} |
Already covered by `santizeFileName`? | private void startShutdownDeadlineExecutor() {
shutdownDeadlineExecutor = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("Shutdown deadline timer"));
shutdownDeadlineExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
long delayMillis = (long)(shudownTimeoutS.get() * 1000.0);
shutdownDeadlineExecutor.schedule(() -> {
if (dumpHeapOnShutdownTimeout.get()) {
String saneConfigId = configId.replaceAll(SANITIZE_FILENAME, "_");
String heapDumpName = Defaults.getDefaults().underVespaHome("var/crash/java_pid.") + santizeFileName(saneConfigId) + "." + ProcessHandle.current().pid() + ".hprof";
com.yahoo.protect.Process.dumpHeap(heapDumpName, true);
}
com.yahoo.protect.Process.logAndDie(
"Timed out waiting for application shutdown. Please check that all your request handlers " +
"drain their request content channels.", true);
}, delayMillis, TimeUnit.MILLISECONDS);
} | String saneConfigId = configId.replaceAll(SANITIZE_FILENAME, "_"); | private void startShutdownDeadlineExecutor() {
shutdownDeadlineExecutor = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("Shutdown deadline timer"));
shutdownDeadlineExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
long delayMillis = (long)(shudownTimeoutS.get() * 1000.0);
shutdownDeadlineExecutor.schedule(() -> {
if (dumpHeapOnShutdownTimeout.get()) {
String heapDumpName = Defaults.getDefaults().underVespaHome("var/crash/java_pid.") + santizeFileName(configId) + "." + ProcessHandle.current().pid() + ".hprof";
com.yahoo.protect.Process.dumpHeap(heapDumpName, true);
}
com.yahoo.protect.Process.logAndDie(
"Timed out waiting for application shutdown. Please check that all your request handlers " +
"drain their request content channels.", true);
}, delayMillis, TimeUnit.MILLISECONDS);
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} |
Not really needed but yes you're right, it's "more correct". We need to assign it the host resource then. | private StorageNode buildSingleNode(DeployState deployState, ContentCluster parent) {
int distributionKey = 0;
StorageNode sNode = new StorageNode(deployState.getProperties(), parent.getStorageCluster(), 1.0, distributionKey , false);
sNode.setHostResource(parent.hostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC));
sNode.initService(deployLogger);
PersistenceEngine provider = parent.getPersistence().create(deployState, sNode, storageGroup, null);
new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider);
return sNode;
} | sNode.initService(deployLogger); | private StorageNode buildSingleNode(DeployState deployState, ContentCluster parent) {
int distributionKey = 0;
StorageNode searchNode = new StorageNode(deployState.getProperties(), parent.getStorageCluster(), 1.0, distributionKey , false);
searchNode.setHostResource(parent.hostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC));
PersistenceEngine provider = parent.getPersistence().create(deployState, searchNode, storageGroup, null);
searchNode.initService(deployLogger);
Distributor distributor = new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider);
distributor.setHostResource(searchNode.getHostResource());
distributor.initService(deployLogger);
return searchNode;
} | class GroupBuilder {
private final StorageGroup storageGroup;
/* The explicitly defined subgroups of this */
private final List<GroupBuilder> subGroups;
private final List<XmlNodeBuilder> nodeBuilders;
/** The nodes explicitly specified as a nodes tag in this group, or empty if none */
private final Optional<NodesSpecification> nodeRequirement;
private final DeployLogger deployLogger;
private GroupBuilder(StorageGroup storageGroup, List<GroupBuilder> subGroups, List<XmlNodeBuilder> nodeBuilders,
Optional<NodesSpecification> nodeRequirement, DeployLogger deployLogger) {
this.storageGroup = storageGroup;
this.subGroups = subGroups;
this.nodeBuilders = nodeBuilders;
this.nodeRequirement = nodeRequirement;
this.deployLogger = deployLogger;
}
/**
* Builds a storage group for a nonhosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildNonHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildNonHosted(deployState, owner, Optional.of(this)));
}
for (XmlNodeBuilder nodeBuilder : nodeBuilders) {
storageGroup.nodes.add(nodeBuilder.build(deployState, owner, storageGroup));
}
if (parent.isEmpty() && subGroups.isEmpty() && nodeBuilders.isEmpty()) {
storageGroup.nodes.add(buildSingleNode(deployState, owner));
}
return storageGroup;
}
/**
* Builds a storage group for a hosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
if (storageGroup.getIndex() != null)
throw new IllegalArgumentException("Specifying individual groups is not supported on hosted applications");
Map<HostResource, ClusterMembership> hostMapping =
nodeRequirement.isPresent() ?
provisionHosts(nodeRequirement.get(), owner.getStorageCluster().getClusterName(), owner.getRoot().hostSystem(), deployLogger) :
Collections.emptyMap();
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroups = collectAllocatedSubgroups(hostMapping);
if (hostGroups.size() > 1) {
if (parent.isPresent())
throw new IllegalArgumentException("Cannot specify groups using the groups attribute in nested content groups");
for (Map.Entry<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroup : hostGroups.entrySet()) {
String groupIndex = String.valueOf(hostGroup.getKey().get().index());
StorageGroup subgroup = new StorageGroup(true, groupIndex, groupIndex);
for (Map.Entry<HostResource, ClusterMembership> host : hostGroup.getValue().entrySet()) {
subgroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), subgroup, host.getValue()));
}
storageGroup.subgroups.add(subgroup);
}
}
else {
for (Map.Entry<HostResource, ClusterMembership> host : hostMapping.entrySet()) {
storageGroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), storageGroup, host.getValue()));
}
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildHosted(deployState, owner, Optional.of(this)));
}
}
return storageGroup;
}
/** Collect hosts per group */
private Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> collectAllocatedSubgroups(Map<HostResource, ClusterMembership> hostMapping) {
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostsPerGroup = new LinkedHashMap<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hostMapping.entrySet()) {
Optional<ClusterSpec.Group> group = entry.getValue().cluster().group();
Map<HostResource, ClusterMembership> hostsInGroup = hostsPerGroup.get(group);
if (hostsInGroup == null) {
hostsInGroup = new LinkedHashMap<>();
hostsPerGroup.put(group, hostsInGroup);
}
hostsInGroup.put(entry.getKey(), entry.getValue());
}
return hostsPerGroup;
}
} | class GroupBuilder {
private final StorageGroup storageGroup;
/* The explicitly defined subgroups of this */
private final List<GroupBuilder> subGroups;
private final List<XmlNodeBuilder> nodeBuilders;
/** The nodes explicitly specified as a nodes tag in this group, or empty if none */
private final Optional<NodesSpecification> nodeRequirement;
private final DeployLogger deployLogger;
private GroupBuilder(StorageGroup storageGroup, List<GroupBuilder> subGroups, List<XmlNodeBuilder> nodeBuilders,
Optional<NodesSpecification> nodeRequirement, DeployLogger deployLogger) {
this.storageGroup = storageGroup;
this.subGroups = subGroups;
this.nodeBuilders = nodeBuilders;
this.nodeRequirement = nodeRequirement;
this.deployLogger = deployLogger;
}
/**
* Builds a storage group for a nonhosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildNonHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildNonHosted(deployState, owner, Optional.of(this)));
}
for (XmlNodeBuilder nodeBuilder : nodeBuilders) {
storageGroup.nodes.add(nodeBuilder.build(deployState, owner, storageGroup));
}
if (parent.isEmpty() && subGroups.isEmpty() && nodeBuilders.isEmpty()) {
storageGroup.nodes.add(buildSingleNode(deployState, owner));
}
return storageGroup;
}
/**
* Builds a storage group for a hosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
if (storageGroup.getIndex() != null)
throw new IllegalArgumentException("Specifying individual groups is not supported on hosted applications");
Map<HostResource, ClusterMembership> hostMapping =
nodeRequirement.isPresent() ?
provisionHosts(nodeRequirement.get(), owner.getStorageCluster().getClusterName(), owner.getRoot().hostSystem(), deployLogger) :
Collections.emptyMap();
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroups = collectAllocatedSubgroups(hostMapping);
if (hostGroups.size() > 1) {
if (parent.isPresent())
throw new IllegalArgumentException("Cannot specify groups using the groups attribute in nested content groups");
for (Map.Entry<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroup : hostGroups.entrySet()) {
String groupIndex = String.valueOf(hostGroup.getKey().get().index());
StorageGroup subgroup = new StorageGroup(true, groupIndex, groupIndex);
for (Map.Entry<HostResource, ClusterMembership> host : hostGroup.getValue().entrySet()) {
subgroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), subgroup, host.getValue()));
}
storageGroup.subgroups.add(subgroup);
}
}
else {
for (Map.Entry<HostResource, ClusterMembership> host : hostMapping.entrySet()) {
storageGroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), storageGroup, host.getValue()));
}
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildHosted(deployState, owner, Optional.of(this)));
}
}
return storageGroup;
}
/** Collect hosts per group */
private Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> collectAllocatedSubgroups(Map<HostResource, ClusterMembership> hostMapping) {
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostsPerGroup = new LinkedHashMap<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hostMapping.entrySet()) {
Optional<ClusterSpec.Group> group = entry.getValue().cluster().group();
Map<HostResource, ClusterMembership> hostsInGroup = hostsPerGroup.get(group);
if (hostsInGroup == null) {
hostsInGroup = new LinkedHashMap<>();
hostsPerGroup.put(group, hostsInGroup);
}
hostsInGroup.put(entry.getKey(), entry.getValue());
}
return hostsPerGroup;
}
} |
Fixed | private void startShutdownDeadlineExecutor() {
shutdownDeadlineExecutor = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("Shutdown deadline timer"));
shutdownDeadlineExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
long delayMillis = (long)(shudownTimeoutS.get() * 1000.0);
shutdownDeadlineExecutor.schedule(() -> {
if (dumpHeapOnShutdownTimeout.get()) {
String saneConfigId = configId.replaceAll(SANITIZE_FILENAME, "_");
String heapDumpName = Defaults.getDefaults().underVespaHome("var/crash/java_pid.") + santizeFileName(saneConfigId) + "." + ProcessHandle.current().pid() + ".hprof";
com.yahoo.protect.Process.dumpHeap(heapDumpName, true);
}
com.yahoo.protect.Process.logAndDie(
"Timed out waiting for application shutdown. Please check that all your request handlers " +
"drain their request content channels.", true);
}, delayMillis, TimeUnit.MILLISECONDS);
} | String saneConfigId = configId.replaceAll(SANITIZE_FILENAME, "_"); | private void startShutdownDeadlineExecutor() {
shutdownDeadlineExecutor = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("Shutdown deadline timer"));
shutdownDeadlineExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
long delayMillis = (long)(shudownTimeoutS.get() * 1000.0);
shutdownDeadlineExecutor.schedule(() -> {
if (dumpHeapOnShutdownTimeout.get()) {
String heapDumpName = Defaults.getDefaults().underVespaHome("var/crash/java_pid.") + santizeFileName(configId) + "." + ProcessHandle.current().pid() + ".hprof";
com.yahoo.protect.Process.dumpHeap(heapDumpName, true);
}
com.yahoo.protect.Process.logAndDie(
"Timed out waiting for application shutdown. Please check that all your request handlers " +
"drain their request content channels.", true);
}, delayMillis, TimeUnit.MILLISECONDS);
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} | class to be loaded,
* which runs the static block.
*/
@SuppressWarnings("UnusedDeclaration")
public static void ensureVespaLoggingInitialized() {
} |
Use putIfAbsent instead. | public void populateWithStructFields(SDDocumentType sdoc, String name, DataType dataType, int recursion) {
DataType dt = getFirstStructOrMapRecursive();
if (dt == null) return;
if (dataType instanceof MapDataType) {
MapDataType mdt = (MapDataType) dataType;
SDField keyField = new SDField(sdoc, name.concat(".key"), mdt.getKeyType(),
getOwnerDocType(), new Matching(), true, recursion + 1);
structFields.put("key", keyField);
SDField valueField = new SDField(sdoc, name.concat(".value"), mdt.getValueType(),
getOwnerDocType(), new Matching(), true, recursion + 1);
structFields.put("value", valueField);
} else {
if (recursion >= 10) return;
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
if (dataType instanceof StructDataType) {
SDDocumentType subType = sdoc != null ? sdoc.getType(dataType.getName()) : null;
if (subType == null) {
throw new IllegalArgumentException("Could not find struct '" + dataType.getName() + "'.");
}
for (Field field : subType.fieldSet()) {
SDField subField = new SDField(sdoc, name.concat(".").concat(field.getName()), field.getDataType(),
subType, new Matching(), true, recursion + 1);
if (!structFields.containsKey(field.getName())) {
structFields.put(field.getName(), subField);
}
}
}
}
} | if (!structFields.containsKey(field.getName())) { | public void populateWithStructFields(SDDocumentType sdoc, String name, DataType dataType, int recursion) {
DataType dt = getFirstStructOrMapRecursive();
if (dt == null) return;
if (dataType instanceof MapDataType) {
MapDataType mdt = (MapDataType) dataType;
SDField keyField = new SDField(sdoc, name.concat(".key"), mdt.getKeyType(),
getOwnerDocType(), new Matching(), true, recursion + 1);
structFields.put("key", keyField);
SDField valueField = new SDField(sdoc, name.concat(".value"), mdt.getValueType(),
getOwnerDocType(), new Matching(), true, recursion + 1);
structFields.put("value", valueField);
} else {
if (recursion >= 10) return;
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
if (dataType instanceof StructDataType) {
SDDocumentType subType = sdoc != null ? sdoc.getType(dataType.getName()) : null;
if (subType == null) {
throw new IllegalArgumentException("Could not find struct '" + dataType.getName() + "'.");
}
for (Field field : subType.fieldSet()) {
SDField subField = new SDField(sdoc, name.concat(".").concat(field.getName()), field.getDataType(),
subType, new Matching(), true, recursion + 1);
structFields.putIfAbsent(field.getName(), subField);
}
}
}
} | class SDField extends Field implements TypedKey, FieldOperationContainer, ImmutableSDField {
/** Use this field for modifying index-structure, even if it doesn't have any indexing code */
private boolean indexStructureField = false;
/** The indexing statements to be applied to this value during indexing */
private ScriptExpression indexingScript = new ScriptExpression();
/** The default rank type for indices of this field */
private RankType rankType = RankType.DEFAULT;
/** Rank settings in a "rank" block for the field. */
private final Ranking ranking = new Ranking();
/**
* The literal boost of this field. This boost is added to a rank score
* when a query term matched as query term exactly (unnormalized and unstemmed).
* Non-positive boosts causes no boosting, 0 allows boosts
* to be specified in other rank profiles, while negative values
* turns the capability off.
*/
private int literalBoost = -1;
/**
* The weight of this field. This is a percentage,
* so 100 is default to provide the identity transform.
*/
private int weight = 100;
/**
* Indicates what kind of matching should be done on this field
*/
private Matching matching = new Matching();
private Dictionary dictionary = null;
/** Attribute settings, or null if there are none */
private final Map<String, Attribute> attributes = new TreeMap<>();
/**
* The stemming setting of this field, or null to use the default.
* Default is determined by the owning search definition.
*/
private Stemming stemming = null;
/** How content of this field should be accent normalized etc. */
private NormalizeLevel normalizing = new NormalizeLevel();
/** Extra query commands of this field */
private final List<String> queryCommands = new java.util.ArrayList<>(0);
/** Summary fields defined in this field */
private final Map<String, SummaryField> summaryFields = new java.util.LinkedHashMap<>(0);
/** The explicitly index settings on this field */
private final Map<String, Index> indices = new java.util.LinkedHashMap<>();
private boolean idOverride = false;
/** Struct fields defined in this field */
private final Map<String,SDField> structFields = new java.util.LinkedHashMap<>(0);
/** The document that this field was declared in, or null*/
private SDDocumentType ownerDocType = null;
/** The aliases declared for this field. May pertain to indexes or attributes */
private final Map<String, String> aliasToName = new HashMap<>();
/** Pending operations that must be applied after parsing, due to use of not-yet-defined structs. */
private final List<FieldOperation> pendingOperations = new LinkedList<>();
private boolean isExtraField = false;
private boolean wasConfiguredToDoAttributing = false;
/**
* Creates a new field. This method is only used to create reserved fields.
*
* @param name the name of the field
* @param dataType the datatype of the field
*/
protected SDField(SDDocumentType repo, String name, int id, DataType dataType, boolean populate) {
super(name, id, dataType);
populate(populate, repo, name, dataType);
}
public SDField(SDDocumentType repo, String name, int id, DataType dataType) {
this(repo, name, id, dataType, true);
}
/** Creates a new field */
public SDField(SDDocumentType repo, String name, DataType dataType, boolean populate) {
super(name, dataType);
populate(populate, repo, name, dataType);
}
/** Creates a new field */
protected SDField(SDDocumentType repo, String name, DataType dataType, SDDocumentType owner, boolean populate) {
super(name, dataType, owner == null ? null : owner.getDocumentType());
this.ownerDocType = owner;
populate(populate, repo, name, dataType);
}
/**
* Creates a new field
*
* @param name the name of the field
* @param dataType the datatype of the field
* @param owner the owning document (used to check for id collisions)
* @param fieldMatching the matching object to set for the field
*/
protected SDField(SDDocumentType repo, String name, DataType dataType, SDDocumentType owner,
Matching fieldMatching, boolean populate, int recursion) {
super(name, dataType, owner == null ? null : owner.getDocumentType());
this.ownerDocType = owner;
if (fieldMatching != null)
this.setMatching(fieldMatching);
populate(populate, repo, name, dataType, fieldMatching, recursion);
}
public SDField(SDDocumentType repo, String name, DataType dataType) {
this(repo, name,dataType, true);
}
public SDField(String name, DataType dataType) {
this(null, name,dataType);
}
private void populate(boolean populate, SDDocumentType repo, String name, DataType dataType) {
populate(populate, repo, name, dataType, null, 0);
}
private void populate(boolean populate, SDDocumentType repo, String name, DataType dataType, Matching fieldMatching, int recursion) {
if (dataType instanceof TensorDataType) {
TensorType type = ((TensorDataType)dataType).getTensorType();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type in field " + name + " type " + type +
": Dense tensor dimensions must have a size");
addQueryCommand("type " + type);
}
else {
addQueryCommand("type " + dataType.getName());
}
if (populate || (dataType instanceof MapDataType)) {
populateWithStructFields(repo, name, dataType, recursion);
populateWithStructMatching(repo, name, dataType, fieldMatching);
}
}
public void setIsExtraField(boolean isExtra) {
isExtraField = isExtra;
}
@Override
public boolean isExtraField() {
return isExtraField;
}
@Override
public boolean isImportedField() {
return false;
}
@Override
public boolean doesAttributing() {
return containsExpression(AttributeExpression.class);
}
@Override
public boolean doesIndexing() {
return containsExpression(IndexExpression.class);
}
public boolean doesSummarying() {
if (usesStruct()) {
for (SDField structField : getStructFields()) {
if (structField.doesSummarying()) {
return true;
}
}
}
return containsExpression(SummaryExpression.class);
}
@Override
public boolean doesLowerCasing() {
return containsExpression(LowerCaseExpression.class);
}
@Override
public <T extends Expression> boolean containsExpression(Class<T> searchFor) {
return findExpression(searchFor) != null;
}
private <T extends Expression> T findExpression(Class<T> searchFor) {
return new ExpressionSearcher<>(searchFor).searchIn(indexingScript);
}
public void addSummaryFieldSources(SummaryField summaryField) {
if (usesStruct()) {
/*
* How this works for structs: When at least one sub-field in a struct is to
* be used for summary, that whole struct field is included in summary.cfg. Then,
* vsmsummary.cfg specifies the sub-fields used for each struct field.
* So we recurse into each struct, adding the destination classes set for each sub-field
* to the main summary-field for the struct field.
*/
for (SDField structField : getStructFields()) {
for (SummaryField sumF : structField.getSummaryFields().values()) {
for (String dest : sumF.getDestinations()) {
summaryField.addDestination(dest);
}
}
structField.addSummaryFieldSources(summaryField);
}
} else {
if (doesSummarying()) {
summaryField.addSource(getName());
}
}
}
public void populateWithStructMatching(SDDocumentType sdoc, String name, DataType dataType,
Matching superFieldMatching) {
DataType dt = getFirstStructOrMapRecursive();
if (dt == null) return;
if (dataType instanceof MapDataType) {
MapDataType mdt = (MapDataType) dataType;
Matching keyFieldMatching = new Matching();
if (superFieldMatching != null) {
keyFieldMatching.merge(superFieldMatching);
}
SDField keyField = structFields.get(name.concat(".key"));
if (keyField != null) {
keyField.populateWithStructMatching(sdoc, name.concat(".key"), mdt.getKeyType(), keyFieldMatching);
keyField.setMatching(keyFieldMatching);
}
Matching valueFieldMatching = new Matching();
if (superFieldMatching != null) {
valueFieldMatching.merge(superFieldMatching);
}
SDField valueField = structFields.get(name.concat(".value"));
if (valueField != null) {
valueField.populateWithStructMatching(sdoc, name.concat(".value"), mdt.getValueType(),
valueFieldMatching);
valueField.setMatching(valueFieldMatching);
}
} else {
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
if (dataType instanceof StructDataType) {
SDDocumentType subType = sdoc != null ? sdoc.getType(dataType.getName()) : null;
if (subType != null) {
for (Field f : subType.fieldSet()) {
if (f instanceof SDField) {
SDField field = (SDField) f;
Matching subFieldMatching = new Matching();
if (superFieldMatching != null) {
subFieldMatching.merge(superFieldMatching);
}
subFieldMatching.merge(field.getMatching());
SDField subField = structFields.get(field.getName());
if (subField != null) {
subField.populateWithStructMatching(sdoc, name.concat(".").concat(field.getName()), field.getDataType(),
subFieldMatching);
subField.setMatching(subFieldMatching);
}
} else {
throw new IllegalArgumentException("Field in struct is not SDField " + f.getName());
}
}
} else {
throw new IllegalArgumentException("Could not find struct " + dataType.getName());
}
}
}
}
public void addOperation(FieldOperation op) {
pendingOperations.add(op);
}
@Override
public void applyOperations(SDField field) {
if (pendingOperations.isEmpty()) return;
Collections.sort(pendingOperations);
ListIterator<FieldOperation> ops = pendingOperations.listIterator();
while (ops.hasNext()) {
FieldOperation op = ops.next();
ops.remove();
op.apply(field);
}
}
public void applyOperations() {
applyOperations(this);
}
public void setId(int fieldId, DocumentType owner) {
super.setId(fieldId, owner);
idOverride = true;
}
public StructDataType getFirstStructRecursive() {
DataType dataType = getDataType();
while (true) {
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
} else if (dataType instanceof MapDataType) {
dataType = ((MapDataType)dataType).getValueType();
} else {
break;
}
}
return (dataType instanceof StructDataType) ? (StructDataType)dataType : null;
}
private DataType getFirstStructOrMapRecursive() {
DataType dataType = getDataType();
while (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
return (dataType instanceof StructDataType || dataType instanceof MapDataType) ? dataType : null;
}
private boolean usesStruct() {
DataType dt = getFirstStructRecursive();
return (dt != null);
}
@Override
public boolean usesStructOrMap() {
DataType dt = getFirstStructOrMapRecursive();
return (dt != null);
}
@Override
public boolean wasConfiguredToDoAttributing() {
return wasConfiguredToDoAttributing;
}
/** Parse an indexing expression which will use the simple linguistics implementatino suitable for testing */
public void parseIndexingScript(String script) {
parseIndexingScript(script, new SimpleLinguistics(), Embedder.throwsOnUse);
}
public void parseIndexingScript(String script, Linguistics linguistics, Embedder embedder) {
try {
ScriptParserContext config = new ScriptParserContext(linguistics, embedder);
config.setInputStream(new IndexingInput(script));
setIndexingScript(ScriptExpression.newInstance(config));
} catch (ParseException e) {
throw new RuntimeException("Failed to parser script '" + script + "'.", e);
}
}
/** Sets the indexing script of this, or null to not use a script */
public void setIndexingScript(ScriptExpression exp) {
if (exp == null) {
exp = new ScriptExpression();
}
indexingScript = exp;
if (indexingScript.isEmpty()) {
return;
}
if (!wasConfiguredToDoAttributing()) {
wasConfiguredToDoAttributing = doesAttributing();
}
if (!usesStructOrMap()) {
new ExpressionVisitor() {
@Override
protected void doVisit(Expression exp) {
if (!(exp instanceof AttributeExpression)) {
return;
}
String fieldName = ((AttributeExpression)exp).getFieldName();
if (fieldName == null) {
fieldName = getName();
}
Attribute attribute = attributes.get(fieldName);
if (attribute == null) {
addAttribute(new Attribute(fieldName, getDataType()));
}
}
}.visit(indexingScript);
}
for (SDField structField : getStructFields()) {
structField.setIndexingScript(exp);
}
}
@Override
public ScriptExpression getIndexingScript() { return indexingScript; }
@SuppressWarnings("deprecation")
@Override
public void setDataType(DataType type) {
if (type.equals(DataType.URI)) {
normalizing.inferLowercase();
stemming = Stemming.NONE;
}
this.dataType = type;
if ( ! idOverride) {
this.fieldId = calculateIdV7(null);
}
}
@Override
public boolean isIndexStructureField() {
return indexStructureField;
}
public void setIndexStructureField(boolean indexStructureField) {
this.indexStructureField = indexStructureField;
}
@Override
public boolean hasIndex() {
return (getIndexingScript() != null) && doesIndexing();
}
/** Sets the literal boost of this field */
public void setLiteralBoost(int literalBoost) { this.literalBoost=literalBoost; }
/**
* Returns the literal boost of this field. This boost is added to a literal score
* when a query term matched as query term exactly (unnormalized and unstemmed).
* Default is non-positive.
*/
@Override
public int getLiteralBoost() { return literalBoost; }
/** Sets the weight of this field */
public void setWeight(int weight) { this.weight=weight; }
/** Returns the weight of this field, or 0 if nothing is set */
@Override
public int getWeight() { return weight; }
/**
* Returns what kind of matching type should be applied.
*/
@Override
public Matching getMatching() { return matching; }
/**
* Sets what kind of matching type should be applied.
* (Token matching is default, PREFIX, SUBSTRING, SUFFIX are alternatives)
*/
public void setMatching(Matching matching) { this.matching=matching; }
/**
* Returns Dictionary settings.
*/
public Dictionary getDictionary() { return dictionary; }
public Dictionary getOrSetDictionary() {
if (dictionary == null) {
dictionary = new Dictionary();
}
return dictionary;
}
/**
* Set the matching type for this field and all subfields.
*/
public void setMatchingType(Matching.Type type) {
this.getMatching().setType(type);
for (SDField structField : getStructFields()) {
structField.setMatchingType(type);
}
}
/**
* Set the matching type for this field and all subfields.
*/
public void setMatchingCase(Case casing) {
this.getMatching().setCase(casing);
for (SDField structField : getStructFields()) {
structField.setMatchingCase(casing);
}
}
/**
* Set matching algorithm for this field and all subfields.
*/
public void setMatchingAlgorithm(Matching.Algorithm algorithm) {
this.getMatching().setAlgorithm(algorithm);
for (SDField structField : getStructFields()) {
structField.getMatching().setAlgorithm(algorithm);
}
}
/** Adds an explicit index defined in this field */
public void addIndex(Index index) {
indices.put(index.getName(),index);
}
/**
* Returns an index, or null if no index with this name has had
* some <b>explicit settings</b> applied in this field (even if this returns null,
* the index may be implicitly defined by an indexing statement)
*/
@Override
public Index getIndex(String name) {
return indices.get(name);
}
/**
* Returns an index if this field has one (implicitly or
* explicitly) targeting the given name.
*/
@Override
public boolean existsIndex(String name) {
if (indices.get(name) != null) return true;
return name.equals(getName()) && doesIndexing();
}
/**
* Defined indices on this field
* @return defined indices on this
*/
@Override
public Map<String, Index> getIndices() {
return indices;
}
/**
* Sets the default rank type of this fields indices, and sets this rank type
* to all indices explicitly defined here which has no index set.
* (This complex behavior is dues to the fact than we would prefer to have rank types
* per field, not per index)
*/
public void setRankType(RankType rankType) {
this.rankType=rankType;
for (Index index : getIndices().values()) {
if (index.getRankType()==null)
index.setRankType(rankType);
}
}
/** Returns the rank settings set in a "rank" block for this field. This is never null. */
@Override
public Ranking getRanking() { return ranking; }
/** Returns the default rank type of indices of this field, or null if nothing is set */
@Override
public RankType getRankType() { return this.rankType; }
/**
* Returns the search-time attribute settings of this field or null if none is set.
*
* <p>TODO: Make unmodifiable.</p>
*/
@Override
public Map<String, Attribute> getAttributes() { return attributes; }
public Attribute getAttribute() {
return attributes.get(getName());
}
public void addAttribute(Attribute attribute) {
String name = attribute.getName();
if (name == null || "".equals(name)) {
name = getName();
attribute.setName(name);
}
attributes.put(attribute.getName(),attribute);
}
/**
* Returns the stemming setting of this field.
* Default is determined by the owning search definition.
*
* @return the stemming setting of this, or null, to use the default
*/
@Override
public Stemming getStemming() { return stemming; }
/**
* Whether this field should be stemmed in this search definition
*/
@Override
public Stemming getStemming(Search search) {
if (stemming != null)
return stemming;
else
return search.getStemming();
}
@Override
public Field asField() {
return this;
}
/**
* Sets how this field should be stemmed, or set to null to use the default.
*/
public void setStemming(Stemming stemming) {
this.stemming = stemming;
}
/** Returns an unmodifiable map of the summary fields defined in this */
@Override
public Map<String, SummaryField> getSummaryFields() {
return Collections.unmodifiableMap(summaryFields);
}
public void removeSummaryFields() {
summaryFields.clear();
}
/** Adds a summary field */
public void addSummaryField(SummaryField summaryField) {
summaryFields.put(summaryField.getName(),summaryField);
}
/**
* Returns a summary field defined (implicitly or explicitly) by this field.
* Returns null if there is no such summary field defined.
*/
@Override
public SummaryField getSummaryField(String name) {
return summaryFields.get(name);
}
/**
* Returns a summary field defined (implicitly or explicitly) by this field.
*
* @param create true to create the summary field and add it to this field before returning if it is missing
* @return the summary field, or null if not present and create is false
*/
public SummaryField getSummaryField(String name,boolean create) {
SummaryField summaryField=summaryFields.get(name);
if (summaryField==null && create) {
summaryField=new SummaryField(name, getDataType());
addSummaryField(summaryField);
}
return summaryFields.get(name);
}
/** Returns list of static struct fields */
@Override
public Collection<SDField> getStructFields() { return structFields.values(); }
/**
* Returns a struct field defined in this field,
* potentially traversing into nested structs.
* Returns null if there is no such struct field defined.
*/
@Override
public SDField getStructField(String name) {
if (name.contains(".")) {
String superFieldName = name.substring(0,name.indexOf("."));
String subFieldName = name.substring(name.indexOf(".")+1);
SDField superField = structFields.get(superFieldName);
if (superField != null) {
return superField.getStructField(subFieldName);
}
return null;
}
return structFields.get(name);
}
/**
* Returns how the content of this field should be accent normalized etc
*/
@Override
public NormalizeLevel getNormalizing() { return normalizing; }
/**
* Change how the content of this field should be accent normalized etc
*/
public void setNormalizing(NormalizeLevel level) { normalizing = level; }
public void addQueryCommand(String name) {
queryCommands.add(name);
}
public boolean hasQueryCommand(String name) {
return queryCommands.contains(name);
}
/** Returns a list of query commands */
@Override
public List<String> getQueryCommands() { return queryCommands; }
/** Returns the document that this field was declared in, or null */
private SDDocumentType getOwnerDocType() {
return ownerDocType;
}
@Override
public boolean equals(Object other) {
if ( ! (other instanceof SDField)) return false;
return super.equals(other);
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
public String toString() {
return "field '" + getName() + "'";
}
/** The aliases declared for this field */
@Override
public Map<String, String> getAliasToName() {
return aliasToName;
}
@Override
public boolean hasFullIndexingDocprocRights() {
Attribute self = getAttributes().get(getName());
return (!isExtraField() || ((self != null) && self.isMutable()));
}
} | class SDField extends Field implements TypedKey, FieldOperationContainer, ImmutableSDField {
/** Use this field for modifying index-structure, even if it doesn't have any indexing code */
private boolean indexStructureField = false;
/** The indexing statements to be applied to this value during indexing */
private ScriptExpression indexingScript = new ScriptExpression();
/** The default rank type for indices of this field */
private RankType rankType = RankType.DEFAULT;
/** Rank settings in a "rank" block for the field. */
private final Ranking ranking = new Ranking();
/**
* The literal boost of this field. This boost is added to a rank score
* when a query term matched as query term exactly (unnormalized and unstemmed).
* Non-positive boosts causes no boosting, 0 allows boosts
* to be specified in other rank profiles, while negative values
* turns the capability off.
*/
private int literalBoost = -1;
/**
* The weight of this field. This is a percentage,
* so 100 is default to provide the identity transform.
*/
private int weight = 100;
/**
* Indicates what kind of matching should be done on this field
*/
private Matching matching = new Matching();
private Dictionary dictionary = null;
/** Attribute settings, or null if there are none */
private final Map<String, Attribute> attributes = new TreeMap<>();
/**
* The stemming setting of this field, or null to use the default.
* Default is determined by the owning search definition.
*/
private Stemming stemming = null;
/** How content of this field should be accent normalized etc. */
private NormalizeLevel normalizing = new NormalizeLevel();
/** Extra query commands of this field */
private final List<String> queryCommands = new java.util.ArrayList<>(0);
/** Summary fields defined in this field */
private final Map<String, SummaryField> summaryFields = new java.util.LinkedHashMap<>(0);
/** The explicitly index settings on this field */
private final Map<String, Index> indices = new java.util.LinkedHashMap<>();
private boolean idOverride = false;
/** Struct fields defined in this field */
private final Map<String,SDField> structFields = new java.util.LinkedHashMap<>(0);
/** The document that this field was declared in, or null*/
private SDDocumentType ownerDocType = null;
/** The aliases declared for this field. May pertain to indexes or attributes */
private final Map<String, String> aliasToName = new HashMap<>();
/** Pending operations that must be applied after parsing, due to use of not-yet-defined structs. */
private final List<FieldOperation> pendingOperations = new LinkedList<>();
private boolean isExtraField = false;
private boolean wasConfiguredToDoAttributing = false;
/**
* Creates a new field. This method is only used to create reserved fields.
*
* @param name the name of the field
* @param dataType the datatype of the field
*/
protected SDField(SDDocumentType repo, String name, int id, DataType dataType, boolean populate) {
super(name, id, dataType);
populate(populate, repo, name, dataType);
}
public SDField(SDDocumentType repo, String name, int id, DataType dataType) {
this(repo, name, id, dataType, true);
}
/** Creates a new field */
public SDField(SDDocumentType repo, String name, DataType dataType, boolean populate) {
super(name, dataType);
populate(populate, repo, name, dataType);
}
/** Creates a new field */
protected SDField(SDDocumentType repo, String name, DataType dataType, SDDocumentType owner, boolean populate) {
super(name, dataType, owner == null ? null : owner.getDocumentType());
this.ownerDocType = owner;
populate(populate, repo, name, dataType);
}
/**
* Creates a new field
*
* @param name the name of the field
* @param dataType the datatype of the field
* @param owner the owning document (used to check for id collisions)
* @param fieldMatching the matching object to set for the field
*/
protected SDField(SDDocumentType repo, String name, DataType dataType, SDDocumentType owner,
Matching fieldMatching, boolean populate, int recursion) {
super(name, dataType, owner == null ? null : owner.getDocumentType());
this.ownerDocType = owner;
if (fieldMatching != null)
this.setMatching(fieldMatching);
populate(populate, repo, name, dataType, fieldMatching, recursion);
}
public SDField(SDDocumentType repo, String name, DataType dataType) {
this(repo, name,dataType, true);
}
public SDField(String name, DataType dataType) {
this(null, name,dataType);
}
private void populate(boolean populate, SDDocumentType repo, String name, DataType dataType) {
populate(populate, repo, name, dataType, null, 0);
}
private void populate(boolean populate, SDDocumentType repo, String name, DataType dataType, Matching fieldMatching, int recursion) {
if (dataType instanceof TensorDataType) {
TensorType type = ((TensorDataType)dataType).getTensorType();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type in field " + name + " type " + type +
": Dense tensor dimensions must have a size");
addQueryCommand("type " + type);
}
else {
addQueryCommand("type " + dataType.getName());
}
if (populate || (dataType instanceof MapDataType)) {
populateWithStructFields(repo, name, dataType, recursion);
populateWithStructMatching(repo, name, dataType, fieldMatching);
}
}
public void setIsExtraField(boolean isExtra) {
isExtraField = isExtra;
}
@Override
public boolean isExtraField() {
return isExtraField;
}
@Override
public boolean isImportedField() {
return false;
}
@Override
public boolean doesAttributing() {
return containsExpression(AttributeExpression.class);
}
@Override
public boolean doesIndexing() {
return containsExpression(IndexExpression.class);
}
public boolean doesSummarying() {
if (usesStruct()) {
for (SDField structField : getStructFields()) {
if (structField.doesSummarying()) {
return true;
}
}
}
return containsExpression(SummaryExpression.class);
}
@Override
public boolean doesLowerCasing() {
return containsExpression(LowerCaseExpression.class);
}
@Override
public <T extends Expression> boolean containsExpression(Class<T> searchFor) {
return findExpression(searchFor) != null;
}
private <T extends Expression> T findExpression(Class<T> searchFor) {
return new ExpressionSearcher<>(searchFor).searchIn(indexingScript);
}
public void addSummaryFieldSources(SummaryField summaryField) {
if (usesStruct()) {
/*
* How this works for structs: When at least one sub-field in a struct is to
* be used for summary, that whole struct field is included in summary.cfg. Then,
* vsmsummary.cfg specifies the sub-fields used for each struct field.
* So we recurse into each struct, adding the destination classes set for each sub-field
* to the main summary-field for the struct field.
*/
for (SDField structField : getStructFields()) {
for (SummaryField sumF : structField.getSummaryFields().values()) {
for (String dest : sumF.getDestinations()) {
summaryField.addDestination(dest);
}
}
structField.addSummaryFieldSources(summaryField);
}
} else {
if (doesSummarying()) {
summaryField.addSource(getName());
}
}
}
public void populateWithStructMatching(SDDocumentType sdoc, String name, DataType dataType,
Matching superFieldMatching) {
DataType dt = getFirstStructOrMapRecursive();
if (dt == null) return;
if (dataType instanceof MapDataType) {
MapDataType mdt = (MapDataType) dataType;
Matching keyFieldMatching = new Matching();
if (superFieldMatching != null) {
keyFieldMatching.merge(superFieldMatching);
}
SDField keyField = structFields.get(name.concat(".key"));
if (keyField != null) {
keyField.populateWithStructMatching(sdoc, name.concat(".key"), mdt.getKeyType(), keyFieldMatching);
keyField.setMatching(keyFieldMatching);
}
Matching valueFieldMatching = new Matching();
if (superFieldMatching != null) {
valueFieldMatching.merge(superFieldMatching);
}
SDField valueField = structFields.get(name.concat(".value"));
if (valueField != null) {
valueField.populateWithStructMatching(sdoc, name.concat(".value"), mdt.getValueType(),
valueFieldMatching);
valueField.setMatching(valueFieldMatching);
}
} else {
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
if (dataType instanceof StructDataType) {
SDDocumentType subType = sdoc != null ? sdoc.getType(dataType.getName()) : null;
if (subType != null) {
for (Field f : subType.fieldSet()) {
if (f instanceof SDField) {
SDField field = (SDField) f;
Matching subFieldMatching = new Matching();
if (superFieldMatching != null) {
subFieldMatching.merge(superFieldMatching);
}
subFieldMatching.merge(field.getMatching());
SDField subField = structFields.get(field.getName());
if (subField != null) {
subField.populateWithStructMatching(sdoc, name.concat(".").concat(field.getName()), field.getDataType(),
subFieldMatching);
subField.setMatching(subFieldMatching);
}
} else {
throw new IllegalArgumentException("Field in struct is not SDField " + f.getName());
}
}
} else {
throw new IllegalArgumentException("Could not find struct " + dataType.getName());
}
}
}
}
public void addOperation(FieldOperation op) {
pendingOperations.add(op);
}
@Override
public void applyOperations(SDField field) {
if (pendingOperations.isEmpty()) return;
Collections.sort(pendingOperations);
ListIterator<FieldOperation> ops = pendingOperations.listIterator();
while (ops.hasNext()) {
FieldOperation op = ops.next();
ops.remove();
op.apply(field);
}
}
public void applyOperations() {
applyOperations(this);
}
public void setId(int fieldId, DocumentType owner) {
super.setId(fieldId, owner);
idOverride = true;
}
public StructDataType getFirstStructRecursive() {
DataType dataType = getDataType();
while (true) {
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
} else if (dataType instanceof MapDataType) {
dataType = ((MapDataType)dataType).getValueType();
} else {
break;
}
}
return (dataType instanceof StructDataType) ? (StructDataType)dataType : null;
}
private DataType getFirstStructOrMapRecursive() {
DataType dataType = getDataType();
while (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
return (dataType instanceof StructDataType || dataType instanceof MapDataType) ? dataType : null;
}
private boolean usesStruct() {
DataType dt = getFirstStructRecursive();
return (dt != null);
}
@Override
public boolean usesStructOrMap() {
DataType dt = getFirstStructOrMapRecursive();
return (dt != null);
}
@Override
public boolean wasConfiguredToDoAttributing() {
return wasConfiguredToDoAttributing;
}
/** Parse an indexing expression which will use the simple linguistics implementatino suitable for testing */
public void parseIndexingScript(String script) {
parseIndexingScript(script, new SimpleLinguistics(), Embedder.throwsOnUse);
}
public void parseIndexingScript(String script, Linguistics linguistics, Embedder embedder) {
try {
ScriptParserContext config = new ScriptParserContext(linguistics, embedder);
config.setInputStream(new IndexingInput(script));
setIndexingScript(ScriptExpression.newInstance(config));
} catch (ParseException e) {
throw new RuntimeException("Failed to parser script '" + script + "'.", e);
}
}
/** Sets the indexing script of this, or null to not use a script */
public void setIndexingScript(ScriptExpression exp) {
if (exp == null) {
exp = new ScriptExpression();
}
indexingScript = exp;
if (indexingScript.isEmpty()) {
return;
}
if (!wasConfiguredToDoAttributing()) {
wasConfiguredToDoAttributing = doesAttributing();
}
if (!usesStructOrMap()) {
new ExpressionVisitor() {
@Override
protected void doVisit(Expression exp) {
if (!(exp instanceof AttributeExpression)) {
return;
}
String fieldName = ((AttributeExpression)exp).getFieldName();
if (fieldName == null) {
fieldName = getName();
}
Attribute attribute = attributes.get(fieldName);
if (attribute == null) {
addAttribute(new Attribute(fieldName, getDataType()));
}
}
}.visit(indexingScript);
}
for (SDField structField : getStructFields()) {
structField.setIndexingScript(exp);
}
}
@Override
public ScriptExpression getIndexingScript() { return indexingScript; }
@SuppressWarnings("deprecation")
@Override
public void setDataType(DataType type) {
if (type.equals(DataType.URI)) {
normalizing.inferLowercase();
stemming = Stemming.NONE;
}
this.dataType = type;
if ( ! idOverride) {
this.fieldId = calculateIdV7(null);
}
}
@Override
public boolean isIndexStructureField() {
return indexStructureField;
}
public void setIndexStructureField(boolean indexStructureField) {
this.indexStructureField = indexStructureField;
}
@Override
public boolean hasIndex() {
return (getIndexingScript() != null) && doesIndexing();
}
/** Sets the literal boost of this field */
public void setLiteralBoost(int literalBoost) { this.literalBoost=literalBoost; }
/**
* Returns the literal boost of this field. This boost is added to a literal score
* when a query term matched as query term exactly (unnormalized and unstemmed).
* Default is non-positive.
*/
@Override
public int getLiteralBoost() { return literalBoost; }
/** Sets the weight of this field */
public void setWeight(int weight) { this.weight=weight; }
/** Returns the weight of this field, or 0 if nothing is set */
@Override
public int getWeight() { return weight; }
/**
* Returns what kind of matching type should be applied.
*/
@Override
public Matching getMatching() { return matching; }
/**
* Sets what kind of matching type should be applied.
* (Token matching is default, PREFIX, SUBSTRING, SUFFIX are alternatives)
*/
public void setMatching(Matching matching) { this.matching=matching; }
/**
* Returns Dictionary settings.
*/
public Dictionary getDictionary() { return dictionary; }
public Dictionary getOrSetDictionary() {
if (dictionary == null) {
dictionary = new Dictionary();
}
return dictionary;
}
/**
* Set the matching type for this field and all subfields.
*/
public void setMatchingType(Matching.Type type) {
this.getMatching().setType(type);
for (SDField structField : getStructFields()) {
structField.setMatchingType(type);
}
}
/**
* Set the matching type for this field and all subfields.
*/
public void setMatchingCase(Case casing) {
this.getMatching().setCase(casing);
for (SDField structField : getStructFields()) {
structField.setMatchingCase(casing);
}
}
/**
* Set matching algorithm for this field and all subfields.
*/
public void setMatchingAlgorithm(Matching.Algorithm algorithm) {
this.getMatching().setAlgorithm(algorithm);
for (SDField structField : getStructFields()) {
structField.getMatching().setAlgorithm(algorithm);
}
}
/** Adds an explicit index defined in this field */
public void addIndex(Index index) {
indices.put(index.getName(),index);
}
/**
* Returns an index, or null if no index with this name has had
* some <b>explicit settings</b> applied in this field (even if this returns null,
* the index may be implicitly defined by an indexing statement)
*/
@Override
public Index getIndex(String name) {
return indices.get(name);
}
/**
* Returns an index if this field has one (implicitly or
* explicitly) targeting the given name.
*/
@Override
public boolean existsIndex(String name) {
if (indices.get(name) != null) return true;
return name.equals(getName()) && doesIndexing();
}
/**
* Defined indices on this field
* @return defined indices on this
*/
@Override
public Map<String, Index> getIndices() {
return indices;
}
/**
* Sets the default rank type of this fields indices, and sets this rank type
* to all indices explicitly defined here which has no index set.
* (This complex behavior is dues to the fact than we would prefer to have rank types
* per field, not per index)
*/
public void setRankType(RankType rankType) {
this.rankType=rankType;
for (Index index : getIndices().values()) {
if (index.getRankType()==null)
index.setRankType(rankType);
}
}
/** Returns the rank settings set in a "rank" block for this field. This is never null. */
@Override
public Ranking getRanking() { return ranking; }
/** Returns the default rank type of indices of this field, or null if nothing is set */
@Override
public RankType getRankType() { return this.rankType; }
/**
* Returns the search-time attribute settings of this field or null if none is set.
*
* <p>TODO: Make unmodifiable.</p>
*/
@Override
public Map<String, Attribute> getAttributes() { return attributes; }
public Attribute getAttribute() {
return attributes.get(getName());
}
public void addAttribute(Attribute attribute) {
String name = attribute.getName();
if (name == null || "".equals(name)) {
name = getName();
attribute.setName(name);
}
attributes.put(attribute.getName(),attribute);
}
/**
* Returns the stemming setting of this field.
* Default is determined by the owning search definition.
*
* @return the stemming setting of this, or null, to use the default
*/
@Override
public Stemming getStemming() { return stemming; }
/**
* Whether this field should be stemmed in this search definition
*/
@Override
public Stemming getStemming(Search search) {
if (stemming != null)
return stemming;
else
return search.getStemming();
}
@Override
public Field asField() {
return this;
}
/**
* Sets how this field should be stemmed, or set to null to use the default.
*/
public void setStemming(Stemming stemming) {
this.stemming = stemming;
}
/** Returns an unmodifiable map of the summary fields defined in this */
@Override
public Map<String, SummaryField> getSummaryFields() {
return Collections.unmodifiableMap(summaryFields);
}
public void removeSummaryFields() {
summaryFields.clear();
}
/** Adds a summary field */
public void addSummaryField(SummaryField summaryField) {
summaryFields.put(summaryField.getName(),summaryField);
}
/**
* Returns a summary field defined (implicitly or explicitly) by this field.
* Returns null if there is no such summary field defined.
*/
@Override
public SummaryField getSummaryField(String name) {
return summaryFields.get(name);
}
/**
* Returns a summary field defined (implicitly or explicitly) by this field.
*
* @param create true to create the summary field and add it to this field before returning if it is missing
* @return the summary field, or null if not present and create is false
*/
public SummaryField getSummaryField(String name,boolean create) {
SummaryField summaryField=summaryFields.get(name);
if (summaryField==null && create) {
summaryField=new SummaryField(name, getDataType());
addSummaryField(summaryField);
}
return summaryFields.get(name);
}
/** Returns list of static struct fields */
@Override
public Collection<SDField> getStructFields() { return structFields.values(); }
/**
* Returns a struct field defined in this field,
* potentially traversing into nested structs.
* Returns null if there is no such struct field defined.
*/
@Override
public SDField getStructField(String name) {
if (name.contains(".")) {
String superFieldName = name.substring(0,name.indexOf("."));
String subFieldName = name.substring(name.indexOf(".")+1);
SDField superField = structFields.get(superFieldName);
if (superField != null) {
return superField.getStructField(subFieldName);
}
return null;
}
return structFields.get(name);
}
/**
* Returns how the content of this field should be accent normalized etc
*/
@Override
public NormalizeLevel getNormalizing() { return normalizing; }
/**
* Change how the content of this field should be accent normalized etc
*/
public void setNormalizing(NormalizeLevel level) { normalizing = level; }
public void addQueryCommand(String name) {
queryCommands.add(name);
}
public boolean hasQueryCommand(String name) {
return queryCommands.contains(name);
}
/** Returns a list of query commands */
@Override
public List<String> getQueryCommands() { return queryCommands; }
/** Returns the document that this field was declared in, or null */
private SDDocumentType getOwnerDocType() {
return ownerDocType;
}
@Override
public boolean equals(Object other) {
if ( ! (other instanceof SDField)) return false;
return super.equals(other);
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
public String toString() {
return "field '" + getName() + "'";
}
/** The aliases declared for this field */
@Override
public Map<String, String> getAliasToName() {
return aliasToName;
}
@Override
public boolean hasFullIndexingDocprocRights() {
Attribute self = getAttributes().get(getName());
return (!isExtraField() || ((self != null) && self.isMutable()));
}
} |
Thanks, fixed. | public void populateWithStructFields(SDDocumentType sdoc, String name, DataType dataType, int recursion) {
DataType dt = getFirstStructOrMapRecursive();
if (dt == null) return;
if (dataType instanceof MapDataType) {
MapDataType mdt = (MapDataType) dataType;
SDField keyField = new SDField(sdoc, name.concat(".key"), mdt.getKeyType(),
getOwnerDocType(), new Matching(), true, recursion + 1);
structFields.put("key", keyField);
SDField valueField = new SDField(sdoc, name.concat(".value"), mdt.getValueType(),
getOwnerDocType(), new Matching(), true, recursion + 1);
structFields.put("value", valueField);
} else {
if (recursion >= 10) return;
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
if (dataType instanceof StructDataType) {
SDDocumentType subType = sdoc != null ? sdoc.getType(dataType.getName()) : null;
if (subType == null) {
throw new IllegalArgumentException("Could not find struct '" + dataType.getName() + "'.");
}
for (Field field : subType.fieldSet()) {
SDField subField = new SDField(sdoc, name.concat(".").concat(field.getName()), field.getDataType(),
subType, new Matching(), true, recursion + 1);
if (!structFields.containsKey(field.getName())) {
structFields.put(field.getName(), subField);
}
}
}
}
} | if (!structFields.containsKey(field.getName())) { | public void populateWithStructFields(SDDocumentType sdoc, String name, DataType dataType, int recursion) {
DataType dt = getFirstStructOrMapRecursive();
if (dt == null) return;
if (dataType instanceof MapDataType) {
MapDataType mdt = (MapDataType) dataType;
SDField keyField = new SDField(sdoc, name.concat(".key"), mdt.getKeyType(),
getOwnerDocType(), new Matching(), true, recursion + 1);
structFields.put("key", keyField);
SDField valueField = new SDField(sdoc, name.concat(".value"), mdt.getValueType(),
getOwnerDocType(), new Matching(), true, recursion + 1);
structFields.put("value", valueField);
} else {
if (recursion >= 10) return;
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
if (dataType instanceof StructDataType) {
SDDocumentType subType = sdoc != null ? sdoc.getType(dataType.getName()) : null;
if (subType == null) {
throw new IllegalArgumentException("Could not find struct '" + dataType.getName() + "'.");
}
for (Field field : subType.fieldSet()) {
SDField subField = new SDField(sdoc, name.concat(".").concat(field.getName()), field.getDataType(),
subType, new Matching(), true, recursion + 1);
structFields.putIfAbsent(field.getName(), subField);
}
}
}
} | class SDField extends Field implements TypedKey, FieldOperationContainer, ImmutableSDField {
/** Use this field for modifying index-structure, even if it doesn't have any indexing code */
private boolean indexStructureField = false;
/** The indexing statements to be applied to this value during indexing */
private ScriptExpression indexingScript = new ScriptExpression();
/** The default rank type for indices of this field */
private RankType rankType = RankType.DEFAULT;
/** Rank settings in a "rank" block for the field. */
private final Ranking ranking = new Ranking();
/**
* The literal boost of this field. This boost is added to a rank score
* when a query term matched as query term exactly (unnormalized and unstemmed).
* Non-positive boosts causes no boosting, 0 allows boosts
* to be specified in other rank profiles, while negative values
* turns the capability off.
*/
private int literalBoost = -1;
/**
* The weight of this field. This is a percentage,
* so 100 is default to provide the identity transform.
*/
private int weight = 100;
/**
* Indicates what kind of matching should be done on this field
*/
private Matching matching = new Matching();
private Dictionary dictionary = null;
/** Attribute settings, or null if there are none */
private final Map<String, Attribute> attributes = new TreeMap<>();
/**
* The stemming setting of this field, or null to use the default.
* Default is determined by the owning search definition.
*/
private Stemming stemming = null;
/** How content of this field should be accent normalized etc. */
private NormalizeLevel normalizing = new NormalizeLevel();
/** Extra query commands of this field */
private final List<String> queryCommands = new java.util.ArrayList<>(0);
/** Summary fields defined in this field */
private final Map<String, SummaryField> summaryFields = new java.util.LinkedHashMap<>(0);
/** The explicitly index settings on this field */
private final Map<String, Index> indices = new java.util.LinkedHashMap<>();
private boolean idOverride = false;
/** Struct fields defined in this field */
private final Map<String,SDField> structFields = new java.util.LinkedHashMap<>(0);
/** The document that this field was declared in, or null*/
private SDDocumentType ownerDocType = null;
/** The aliases declared for this field. May pertain to indexes or attributes */
private final Map<String, String> aliasToName = new HashMap<>();
/** Pending operations that must be applied after parsing, due to use of not-yet-defined structs. */
private final List<FieldOperation> pendingOperations = new LinkedList<>();
private boolean isExtraField = false;
private boolean wasConfiguredToDoAttributing = false;
/**
* Creates a new field. This method is only used to create reserved fields.
*
* @param name the name of the field
* @param dataType the datatype of the field
*/
protected SDField(SDDocumentType repo, String name, int id, DataType dataType, boolean populate) {
super(name, id, dataType);
populate(populate, repo, name, dataType);
}
public SDField(SDDocumentType repo, String name, int id, DataType dataType) {
this(repo, name, id, dataType, true);
}
/** Creates a new field */
public SDField(SDDocumentType repo, String name, DataType dataType, boolean populate) {
super(name, dataType);
populate(populate, repo, name, dataType);
}
/** Creates a new field */
protected SDField(SDDocumentType repo, String name, DataType dataType, SDDocumentType owner, boolean populate) {
super(name, dataType, owner == null ? null : owner.getDocumentType());
this.ownerDocType = owner;
populate(populate, repo, name, dataType);
}
/**
* Creates a new field
*
* @param name the name of the field
* @param dataType the datatype of the field
* @param owner the owning document (used to check for id collisions)
* @param fieldMatching the matching object to set for the field
*/
protected SDField(SDDocumentType repo, String name, DataType dataType, SDDocumentType owner,
Matching fieldMatching, boolean populate, int recursion) {
super(name, dataType, owner == null ? null : owner.getDocumentType());
this.ownerDocType = owner;
if (fieldMatching != null)
this.setMatching(fieldMatching);
populate(populate, repo, name, dataType, fieldMatching, recursion);
}
public SDField(SDDocumentType repo, String name, DataType dataType) {
this(repo, name,dataType, true);
}
public SDField(String name, DataType dataType) {
this(null, name,dataType);
}
private void populate(boolean populate, SDDocumentType repo, String name, DataType dataType) {
populate(populate, repo, name, dataType, null, 0);
}
private void populate(boolean populate, SDDocumentType repo, String name, DataType dataType, Matching fieldMatching, int recursion) {
if (dataType instanceof TensorDataType) {
TensorType type = ((TensorDataType)dataType).getTensorType();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type in field " + name + " type " + type +
": Dense tensor dimensions must have a size");
addQueryCommand("type " + type);
}
else {
addQueryCommand("type " + dataType.getName());
}
if (populate || (dataType instanceof MapDataType)) {
populateWithStructFields(repo, name, dataType, recursion);
populateWithStructMatching(repo, name, dataType, fieldMatching);
}
}
public void setIsExtraField(boolean isExtra) {
isExtraField = isExtra;
}
@Override
public boolean isExtraField() {
return isExtraField;
}
@Override
public boolean isImportedField() {
return false;
}
@Override
public boolean doesAttributing() {
return containsExpression(AttributeExpression.class);
}
@Override
public boolean doesIndexing() {
return containsExpression(IndexExpression.class);
}
public boolean doesSummarying() {
if (usesStruct()) {
for (SDField structField : getStructFields()) {
if (structField.doesSummarying()) {
return true;
}
}
}
return containsExpression(SummaryExpression.class);
}
@Override
public boolean doesLowerCasing() {
return containsExpression(LowerCaseExpression.class);
}
@Override
public <T extends Expression> boolean containsExpression(Class<T> searchFor) {
return findExpression(searchFor) != null;
}
private <T extends Expression> T findExpression(Class<T> searchFor) {
return new ExpressionSearcher<>(searchFor).searchIn(indexingScript);
}
public void addSummaryFieldSources(SummaryField summaryField) {
if (usesStruct()) {
/*
* How this works for structs: When at least one sub-field in a struct is to
* be used for summary, that whole struct field is included in summary.cfg. Then,
* vsmsummary.cfg specifies the sub-fields used for each struct field.
* So we recurse into each struct, adding the destination classes set for each sub-field
* to the main summary-field for the struct field.
*/
for (SDField structField : getStructFields()) {
for (SummaryField sumF : structField.getSummaryFields().values()) {
for (String dest : sumF.getDestinations()) {
summaryField.addDestination(dest);
}
}
structField.addSummaryFieldSources(summaryField);
}
} else {
if (doesSummarying()) {
summaryField.addSource(getName());
}
}
}
public void populateWithStructMatching(SDDocumentType sdoc, String name, DataType dataType,
Matching superFieldMatching) {
DataType dt = getFirstStructOrMapRecursive();
if (dt == null) return;
if (dataType instanceof MapDataType) {
MapDataType mdt = (MapDataType) dataType;
Matching keyFieldMatching = new Matching();
if (superFieldMatching != null) {
keyFieldMatching.merge(superFieldMatching);
}
SDField keyField = structFields.get(name.concat(".key"));
if (keyField != null) {
keyField.populateWithStructMatching(sdoc, name.concat(".key"), mdt.getKeyType(), keyFieldMatching);
keyField.setMatching(keyFieldMatching);
}
Matching valueFieldMatching = new Matching();
if (superFieldMatching != null) {
valueFieldMatching.merge(superFieldMatching);
}
SDField valueField = structFields.get(name.concat(".value"));
if (valueField != null) {
valueField.populateWithStructMatching(sdoc, name.concat(".value"), mdt.getValueType(),
valueFieldMatching);
valueField.setMatching(valueFieldMatching);
}
} else {
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
if (dataType instanceof StructDataType) {
SDDocumentType subType = sdoc != null ? sdoc.getType(dataType.getName()) : null;
if (subType != null) {
for (Field f : subType.fieldSet()) {
if (f instanceof SDField) {
SDField field = (SDField) f;
Matching subFieldMatching = new Matching();
if (superFieldMatching != null) {
subFieldMatching.merge(superFieldMatching);
}
subFieldMatching.merge(field.getMatching());
SDField subField = structFields.get(field.getName());
if (subField != null) {
subField.populateWithStructMatching(sdoc, name.concat(".").concat(field.getName()), field.getDataType(),
subFieldMatching);
subField.setMatching(subFieldMatching);
}
} else {
throw new IllegalArgumentException("Field in struct is not SDField " + f.getName());
}
}
} else {
throw new IllegalArgumentException("Could not find struct " + dataType.getName());
}
}
}
}
public void addOperation(FieldOperation op) {
pendingOperations.add(op);
}
@Override
public void applyOperations(SDField field) {
if (pendingOperations.isEmpty()) return;
Collections.sort(pendingOperations);
ListIterator<FieldOperation> ops = pendingOperations.listIterator();
while (ops.hasNext()) {
FieldOperation op = ops.next();
ops.remove();
op.apply(field);
}
}
public void applyOperations() {
applyOperations(this);
}
public void setId(int fieldId, DocumentType owner) {
super.setId(fieldId, owner);
idOverride = true;
}
public StructDataType getFirstStructRecursive() {
DataType dataType = getDataType();
while (true) {
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
} else if (dataType instanceof MapDataType) {
dataType = ((MapDataType)dataType).getValueType();
} else {
break;
}
}
return (dataType instanceof StructDataType) ? (StructDataType)dataType : null;
}
private DataType getFirstStructOrMapRecursive() {
DataType dataType = getDataType();
while (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
return (dataType instanceof StructDataType || dataType instanceof MapDataType) ? dataType : null;
}
private boolean usesStruct() {
DataType dt = getFirstStructRecursive();
return (dt != null);
}
@Override
public boolean usesStructOrMap() {
DataType dt = getFirstStructOrMapRecursive();
return (dt != null);
}
@Override
public boolean wasConfiguredToDoAttributing() {
return wasConfiguredToDoAttributing;
}
/** Parse an indexing expression which will use the simple linguistics implementatino suitable for testing */
public void parseIndexingScript(String script) {
parseIndexingScript(script, new SimpleLinguistics(), Embedder.throwsOnUse);
}
public void parseIndexingScript(String script, Linguistics linguistics, Embedder embedder) {
try {
ScriptParserContext config = new ScriptParserContext(linguistics, embedder);
config.setInputStream(new IndexingInput(script));
setIndexingScript(ScriptExpression.newInstance(config));
} catch (ParseException e) {
throw new RuntimeException("Failed to parser script '" + script + "'.", e);
}
}
/** Sets the indexing script of this, or null to not use a script */
public void setIndexingScript(ScriptExpression exp) {
if (exp == null) {
exp = new ScriptExpression();
}
indexingScript = exp;
if (indexingScript.isEmpty()) {
return;
}
if (!wasConfiguredToDoAttributing()) {
wasConfiguredToDoAttributing = doesAttributing();
}
if (!usesStructOrMap()) {
new ExpressionVisitor() {
@Override
protected void doVisit(Expression exp) {
if (!(exp instanceof AttributeExpression)) {
return;
}
String fieldName = ((AttributeExpression)exp).getFieldName();
if (fieldName == null) {
fieldName = getName();
}
Attribute attribute = attributes.get(fieldName);
if (attribute == null) {
addAttribute(new Attribute(fieldName, getDataType()));
}
}
}.visit(indexingScript);
}
for (SDField structField : getStructFields()) {
structField.setIndexingScript(exp);
}
}
@Override
public ScriptExpression getIndexingScript() { return indexingScript; }
@SuppressWarnings("deprecation")
@Override
public void setDataType(DataType type) {
if (type.equals(DataType.URI)) {
normalizing.inferLowercase();
stemming = Stemming.NONE;
}
this.dataType = type;
if ( ! idOverride) {
this.fieldId = calculateIdV7(null);
}
}
@Override
public boolean isIndexStructureField() {
return indexStructureField;
}
public void setIndexStructureField(boolean indexStructureField) {
this.indexStructureField = indexStructureField;
}
@Override
public boolean hasIndex() {
return (getIndexingScript() != null) && doesIndexing();
}
/** Sets the literal boost of this field */
public void setLiteralBoost(int literalBoost) { this.literalBoost=literalBoost; }
/**
* Returns the literal boost of this field. This boost is added to a literal score
* when a query term matched as query term exactly (unnormalized and unstemmed).
* Default is non-positive.
*/
@Override
public int getLiteralBoost() { return literalBoost; }
/** Sets the weight of this field */
public void setWeight(int weight) { this.weight=weight; }
/** Returns the weight of this field, or 0 if nothing is set */
@Override
public int getWeight() { return weight; }
/**
* Returns what kind of matching type should be applied.
*/
@Override
public Matching getMatching() { return matching; }
/**
* Sets what kind of matching type should be applied.
* (Token matching is default, PREFIX, SUBSTRING, SUFFIX are alternatives)
*/
public void setMatching(Matching matching) { this.matching=matching; }
/**
* Returns Dictionary settings.
*/
public Dictionary getDictionary() { return dictionary; }
public Dictionary getOrSetDictionary() {
if (dictionary == null) {
dictionary = new Dictionary();
}
return dictionary;
}
/**
* Set the matching type for this field and all subfields.
*/
public void setMatchingType(Matching.Type type) {
this.getMatching().setType(type);
for (SDField structField : getStructFields()) {
structField.setMatchingType(type);
}
}
/**
* Set the matching type for this field and all subfields.
*/
public void setMatchingCase(Case casing) {
this.getMatching().setCase(casing);
for (SDField structField : getStructFields()) {
structField.setMatchingCase(casing);
}
}
/**
* Set matching algorithm for this field and all subfields.
*/
public void setMatchingAlgorithm(Matching.Algorithm algorithm) {
this.getMatching().setAlgorithm(algorithm);
for (SDField structField : getStructFields()) {
structField.getMatching().setAlgorithm(algorithm);
}
}
/** Adds an explicit index defined in this field */
public void addIndex(Index index) {
indices.put(index.getName(),index);
}
/**
* Returns an index, or null if no index with this name has had
* some <b>explicit settings</b> applied in this field (even if this returns null,
* the index may be implicitly defined by an indexing statement)
*/
@Override
public Index getIndex(String name) {
return indices.get(name);
}
/**
* Returns an index if this field has one (implicitly or
* explicitly) targeting the given name.
*/
@Override
public boolean existsIndex(String name) {
if (indices.get(name) != null) return true;
return name.equals(getName()) && doesIndexing();
}
/**
* Defined indices on this field
* @return defined indices on this
*/
@Override
public Map<String, Index> getIndices() {
return indices;
}
/**
* Sets the default rank type of this fields indices, and sets this rank type
* to all indices explicitly defined here which has no index set.
* (This complex behavior is dues to the fact than we would prefer to have rank types
* per field, not per index)
*/
public void setRankType(RankType rankType) {
this.rankType=rankType;
for (Index index : getIndices().values()) {
if (index.getRankType()==null)
index.setRankType(rankType);
}
}
/** Returns the rank settings set in a "rank" block for this field. This is never null. */
@Override
public Ranking getRanking() { return ranking; }
/** Returns the default rank type of indices of this field, or null if nothing is set */
@Override
public RankType getRankType() { return this.rankType; }
/**
* Returns the search-time attribute settings of this field or null if none is set.
*
* <p>TODO: Make unmodifiable.</p>
*/
@Override
public Map<String, Attribute> getAttributes() { return attributes; }
public Attribute getAttribute() {
return attributes.get(getName());
}
public void addAttribute(Attribute attribute) {
String name = attribute.getName();
if (name == null || "".equals(name)) {
name = getName();
attribute.setName(name);
}
attributes.put(attribute.getName(),attribute);
}
/**
* Returns the stemming setting of this field.
* Default is determined by the owning search definition.
*
* @return the stemming setting of this, or null, to use the default
*/
@Override
public Stemming getStemming() { return stemming; }
/**
* Whether this field should be stemmed in this search definition
*/
@Override
public Stemming getStemming(Search search) {
if (stemming != null)
return stemming;
else
return search.getStemming();
}
@Override
public Field asField() {
return this;
}
/**
* Sets how this field should be stemmed, or set to null to use the default.
*/
public void setStemming(Stemming stemming) {
this.stemming = stemming;
}
/** Returns an unmodifiable map of the summary fields defined in this */
@Override
public Map<String, SummaryField> getSummaryFields() {
return Collections.unmodifiableMap(summaryFields);
}
public void removeSummaryFields() {
summaryFields.clear();
}
/** Adds a summary field */
public void addSummaryField(SummaryField summaryField) {
summaryFields.put(summaryField.getName(),summaryField);
}
/**
* Returns a summary field defined (implicitly or explicitly) by this field.
* Returns null if there is no such summary field defined.
*/
@Override
public SummaryField getSummaryField(String name) {
return summaryFields.get(name);
}
/**
* Returns a summary field defined (implicitly or explicitly) by this field.
*
* @param create true to create the summary field and add it to this field before returning if it is missing
* @return the summary field, or null if not present and create is false
*/
public SummaryField getSummaryField(String name,boolean create) {
SummaryField summaryField=summaryFields.get(name);
if (summaryField==null && create) {
summaryField=new SummaryField(name, getDataType());
addSummaryField(summaryField);
}
return summaryFields.get(name);
}
/** Returns list of static struct fields */
@Override
public Collection<SDField> getStructFields() { return structFields.values(); }
/**
* Returns a struct field defined in this field,
* potentially traversing into nested structs.
* Returns null if there is no such struct field defined.
*/
@Override
public SDField getStructField(String name) {
if (name.contains(".")) {
String superFieldName = name.substring(0,name.indexOf("."));
String subFieldName = name.substring(name.indexOf(".")+1);
SDField superField = structFields.get(superFieldName);
if (superField != null) {
return superField.getStructField(subFieldName);
}
return null;
}
return structFields.get(name);
}
/**
* Returns how the content of this field should be accent normalized etc
*/
@Override
public NormalizeLevel getNormalizing() { return normalizing; }
/**
* Change how the content of this field should be accent normalized etc
*/
public void setNormalizing(NormalizeLevel level) { normalizing = level; }
public void addQueryCommand(String name) {
queryCommands.add(name);
}
public boolean hasQueryCommand(String name) {
return queryCommands.contains(name);
}
/** Returns a list of query commands */
@Override
public List<String> getQueryCommands() { return queryCommands; }
/** Returns the document that this field was declared in, or null */
private SDDocumentType getOwnerDocType() {
return ownerDocType;
}
@Override
public boolean equals(Object other) {
if ( ! (other instanceof SDField)) return false;
return super.equals(other);
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
public String toString() {
return "field '" + getName() + "'";
}
/** The aliases declared for this field */
@Override
public Map<String, String> getAliasToName() {
return aliasToName;
}
@Override
public boolean hasFullIndexingDocprocRights() {
Attribute self = getAttributes().get(getName());
return (!isExtraField() || ((self != null) && self.isMutable()));
}
} | class SDField extends Field implements TypedKey, FieldOperationContainer, ImmutableSDField {
/** Use this field for modifying index-structure, even if it doesn't have any indexing code */
private boolean indexStructureField = false;
/** The indexing statements to be applied to this value during indexing */
private ScriptExpression indexingScript = new ScriptExpression();
/** The default rank type for indices of this field */
private RankType rankType = RankType.DEFAULT;
/** Rank settings in a "rank" block for the field. */
private final Ranking ranking = new Ranking();
/**
* The literal boost of this field. This boost is added to a rank score
* when a query term matched as query term exactly (unnormalized and unstemmed).
* Non-positive boosts causes no boosting, 0 allows boosts
* to be specified in other rank profiles, while negative values
* turns the capability off.
*/
private int literalBoost = -1;
/**
* The weight of this field. This is a percentage,
* so 100 is default to provide the identity transform.
*/
private int weight = 100;
/**
* Indicates what kind of matching should be done on this field
*/
private Matching matching = new Matching();
private Dictionary dictionary = null;
/** Attribute settings, or null if there are none */
private final Map<String, Attribute> attributes = new TreeMap<>();
/**
* The stemming setting of this field, or null to use the default.
* Default is determined by the owning search definition.
*/
private Stemming stemming = null;
/** How content of this field should be accent normalized etc. */
private NormalizeLevel normalizing = new NormalizeLevel();
/** Extra query commands of this field */
private final List<String> queryCommands = new java.util.ArrayList<>(0);
/** Summary fields defined in this field */
private final Map<String, SummaryField> summaryFields = new java.util.LinkedHashMap<>(0);
/** The explicitly index settings on this field */
private final Map<String, Index> indices = new java.util.LinkedHashMap<>();
private boolean idOverride = false;
/** Struct fields defined in this field */
private final Map<String,SDField> structFields = new java.util.LinkedHashMap<>(0);
/** The document that this field was declared in, or null*/
private SDDocumentType ownerDocType = null;
/** The aliases declared for this field. May pertain to indexes or attributes */
private final Map<String, String> aliasToName = new HashMap<>();
/** Pending operations that must be applied after parsing, due to use of not-yet-defined structs. */
private final List<FieldOperation> pendingOperations = new LinkedList<>();
private boolean isExtraField = false;
private boolean wasConfiguredToDoAttributing = false;
/**
* Creates a new field. This method is only used to create reserved fields.
*
* @param name the name of the field
* @param dataType the datatype of the field
*/
protected SDField(SDDocumentType repo, String name, int id, DataType dataType, boolean populate) {
super(name, id, dataType);
populate(populate, repo, name, dataType);
}
public SDField(SDDocumentType repo, String name, int id, DataType dataType) {
this(repo, name, id, dataType, true);
}
/** Creates a new field */
public SDField(SDDocumentType repo, String name, DataType dataType, boolean populate) {
super(name, dataType);
populate(populate, repo, name, dataType);
}
/** Creates a new field */
protected SDField(SDDocumentType repo, String name, DataType dataType, SDDocumentType owner, boolean populate) {
super(name, dataType, owner == null ? null : owner.getDocumentType());
this.ownerDocType = owner;
populate(populate, repo, name, dataType);
}
/**
* Creates a new field
*
* @param name the name of the field
* @param dataType the datatype of the field
* @param owner the owning document (used to check for id collisions)
* @param fieldMatching the matching object to set for the field
*/
protected SDField(SDDocumentType repo, String name, DataType dataType, SDDocumentType owner,
Matching fieldMatching, boolean populate, int recursion) {
super(name, dataType, owner == null ? null : owner.getDocumentType());
this.ownerDocType = owner;
if (fieldMatching != null)
this.setMatching(fieldMatching);
populate(populate, repo, name, dataType, fieldMatching, recursion);
}
public SDField(SDDocumentType repo, String name, DataType dataType) {
this(repo, name,dataType, true);
}
public SDField(String name, DataType dataType) {
this(null, name,dataType);
}
private void populate(boolean populate, SDDocumentType repo, String name, DataType dataType) {
populate(populate, repo, name, dataType, null, 0);
}
private void populate(boolean populate, SDDocumentType repo, String name, DataType dataType, Matching fieldMatching, int recursion) {
if (dataType instanceof TensorDataType) {
TensorType type = ((TensorDataType)dataType).getTensorType();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type in field " + name + " type " + type +
": Dense tensor dimensions must have a size");
addQueryCommand("type " + type);
}
else {
addQueryCommand("type " + dataType.getName());
}
if (populate || (dataType instanceof MapDataType)) {
populateWithStructFields(repo, name, dataType, recursion);
populateWithStructMatching(repo, name, dataType, fieldMatching);
}
}
public void setIsExtraField(boolean isExtra) {
isExtraField = isExtra;
}
@Override
public boolean isExtraField() {
return isExtraField;
}
@Override
public boolean isImportedField() {
return false;
}
@Override
public boolean doesAttributing() {
return containsExpression(AttributeExpression.class);
}
@Override
public boolean doesIndexing() {
return containsExpression(IndexExpression.class);
}
public boolean doesSummarying() {
if (usesStruct()) {
for (SDField structField : getStructFields()) {
if (structField.doesSummarying()) {
return true;
}
}
}
return containsExpression(SummaryExpression.class);
}
@Override
public boolean doesLowerCasing() {
return containsExpression(LowerCaseExpression.class);
}
@Override
public <T extends Expression> boolean containsExpression(Class<T> searchFor) {
return findExpression(searchFor) != null;
}
private <T extends Expression> T findExpression(Class<T> searchFor) {
return new ExpressionSearcher<>(searchFor).searchIn(indexingScript);
}
public void addSummaryFieldSources(SummaryField summaryField) {
if (usesStruct()) {
/*
* How this works for structs: When at least one sub-field in a struct is to
* be used for summary, that whole struct field is included in summary.cfg. Then,
* vsmsummary.cfg specifies the sub-fields used for each struct field.
* So we recurse into each struct, adding the destination classes set for each sub-field
* to the main summary-field for the struct field.
*/
for (SDField structField : getStructFields()) {
for (SummaryField sumF : structField.getSummaryFields().values()) {
for (String dest : sumF.getDestinations()) {
summaryField.addDestination(dest);
}
}
structField.addSummaryFieldSources(summaryField);
}
} else {
if (doesSummarying()) {
summaryField.addSource(getName());
}
}
}
public void populateWithStructMatching(SDDocumentType sdoc, String name, DataType dataType,
Matching superFieldMatching) {
DataType dt = getFirstStructOrMapRecursive();
if (dt == null) return;
if (dataType instanceof MapDataType) {
MapDataType mdt = (MapDataType) dataType;
Matching keyFieldMatching = new Matching();
if (superFieldMatching != null) {
keyFieldMatching.merge(superFieldMatching);
}
SDField keyField = structFields.get(name.concat(".key"));
if (keyField != null) {
keyField.populateWithStructMatching(sdoc, name.concat(".key"), mdt.getKeyType(), keyFieldMatching);
keyField.setMatching(keyFieldMatching);
}
Matching valueFieldMatching = new Matching();
if (superFieldMatching != null) {
valueFieldMatching.merge(superFieldMatching);
}
SDField valueField = structFields.get(name.concat(".value"));
if (valueField != null) {
valueField.populateWithStructMatching(sdoc, name.concat(".value"), mdt.getValueType(),
valueFieldMatching);
valueField.setMatching(valueFieldMatching);
}
} else {
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
if (dataType instanceof StructDataType) {
SDDocumentType subType = sdoc != null ? sdoc.getType(dataType.getName()) : null;
if (subType != null) {
for (Field f : subType.fieldSet()) {
if (f instanceof SDField) {
SDField field = (SDField) f;
Matching subFieldMatching = new Matching();
if (superFieldMatching != null) {
subFieldMatching.merge(superFieldMatching);
}
subFieldMatching.merge(field.getMatching());
SDField subField = structFields.get(field.getName());
if (subField != null) {
subField.populateWithStructMatching(sdoc, name.concat(".").concat(field.getName()), field.getDataType(),
subFieldMatching);
subField.setMatching(subFieldMatching);
}
} else {
throw new IllegalArgumentException("Field in struct is not SDField " + f.getName());
}
}
} else {
throw new IllegalArgumentException("Could not find struct " + dataType.getName());
}
}
}
}
public void addOperation(FieldOperation op) {
pendingOperations.add(op);
}
@Override
public void applyOperations(SDField field) {
if (pendingOperations.isEmpty()) return;
Collections.sort(pendingOperations);
ListIterator<FieldOperation> ops = pendingOperations.listIterator();
while (ops.hasNext()) {
FieldOperation op = ops.next();
ops.remove();
op.apply(field);
}
}
public void applyOperations() {
applyOperations(this);
}
public void setId(int fieldId, DocumentType owner) {
super.setId(fieldId, owner);
idOverride = true;
}
public StructDataType getFirstStructRecursive() {
DataType dataType = getDataType();
while (true) {
if (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
} else if (dataType instanceof MapDataType) {
dataType = ((MapDataType)dataType).getValueType();
} else {
break;
}
}
return (dataType instanceof StructDataType) ? (StructDataType)dataType : null;
}
private DataType getFirstStructOrMapRecursive() {
DataType dataType = getDataType();
while (dataType instanceof CollectionDataType) {
dataType = ((CollectionDataType)dataType).getNestedType();
}
return (dataType instanceof StructDataType || dataType instanceof MapDataType) ? dataType : null;
}
private boolean usesStruct() {
DataType dt = getFirstStructRecursive();
return (dt != null);
}
@Override
public boolean usesStructOrMap() {
DataType dt = getFirstStructOrMapRecursive();
return (dt != null);
}
@Override
public boolean wasConfiguredToDoAttributing() {
return wasConfiguredToDoAttributing;
}
/** Parse an indexing expression which will use the simple linguistics implementatino suitable for testing */
public void parseIndexingScript(String script) {
parseIndexingScript(script, new SimpleLinguistics(), Embedder.throwsOnUse);
}
public void parseIndexingScript(String script, Linguistics linguistics, Embedder embedder) {
try {
ScriptParserContext config = new ScriptParserContext(linguistics, embedder);
config.setInputStream(new IndexingInput(script));
setIndexingScript(ScriptExpression.newInstance(config));
} catch (ParseException e) {
throw new RuntimeException("Failed to parser script '" + script + "'.", e);
}
}
/** Sets the indexing script of this, or null to not use a script */
public void setIndexingScript(ScriptExpression exp) {
if (exp == null) {
exp = new ScriptExpression();
}
indexingScript = exp;
if (indexingScript.isEmpty()) {
return;
}
if (!wasConfiguredToDoAttributing()) {
wasConfiguredToDoAttributing = doesAttributing();
}
if (!usesStructOrMap()) {
new ExpressionVisitor() {
@Override
protected void doVisit(Expression exp) {
if (!(exp instanceof AttributeExpression)) {
return;
}
String fieldName = ((AttributeExpression)exp).getFieldName();
if (fieldName == null) {
fieldName = getName();
}
Attribute attribute = attributes.get(fieldName);
if (attribute == null) {
addAttribute(new Attribute(fieldName, getDataType()));
}
}
}.visit(indexingScript);
}
for (SDField structField : getStructFields()) {
structField.setIndexingScript(exp);
}
}
@Override
public ScriptExpression getIndexingScript() { return indexingScript; }
@SuppressWarnings("deprecation")
@Override
public void setDataType(DataType type) {
if (type.equals(DataType.URI)) {
normalizing.inferLowercase();
stemming = Stemming.NONE;
}
this.dataType = type;
if ( ! idOverride) {
this.fieldId = calculateIdV7(null);
}
}
@Override
public boolean isIndexStructureField() {
return indexStructureField;
}
public void setIndexStructureField(boolean indexStructureField) {
this.indexStructureField = indexStructureField;
}
@Override
public boolean hasIndex() {
return (getIndexingScript() != null) && doesIndexing();
}
/** Sets the literal boost of this field */
public void setLiteralBoost(int literalBoost) { this.literalBoost=literalBoost; }
/**
* Returns the literal boost of this field. This boost is added to a literal score
* when a query term matched as query term exactly (unnormalized and unstemmed).
* Default is non-positive.
*/
@Override
public int getLiteralBoost() { return literalBoost; }
/** Sets the weight of this field */
public void setWeight(int weight) { this.weight=weight; }
/** Returns the weight of this field, or 0 if nothing is set */
@Override
public int getWeight() { return weight; }
/**
* Returns what kind of matching type should be applied.
*/
@Override
public Matching getMatching() { return matching; }
/**
* Sets what kind of matching type should be applied.
* (Token matching is default, PREFIX, SUBSTRING, SUFFIX are alternatives)
*/
public void setMatching(Matching matching) { this.matching=matching; }
/**
* Returns Dictionary settings.
*/
public Dictionary getDictionary() { return dictionary; }
public Dictionary getOrSetDictionary() {
if (dictionary == null) {
dictionary = new Dictionary();
}
return dictionary;
}
/**
* Set the matching type for this field and all subfields.
*/
public void setMatchingType(Matching.Type type) {
this.getMatching().setType(type);
for (SDField structField : getStructFields()) {
structField.setMatchingType(type);
}
}
/**
* Set the matching type for this field and all subfields.
*/
public void setMatchingCase(Case casing) {
this.getMatching().setCase(casing);
for (SDField structField : getStructFields()) {
structField.setMatchingCase(casing);
}
}
/**
* Set matching algorithm for this field and all subfields.
*/
public void setMatchingAlgorithm(Matching.Algorithm algorithm) {
this.getMatching().setAlgorithm(algorithm);
for (SDField structField : getStructFields()) {
structField.getMatching().setAlgorithm(algorithm);
}
}
/** Adds an explicit index defined in this field */
public void addIndex(Index index) {
indices.put(index.getName(),index);
}
/**
* Returns an index, or null if no index with this name has had
* some <b>explicit settings</b> applied in this field (even if this returns null,
* the index may be implicitly defined by an indexing statement)
*/
@Override
public Index getIndex(String name) {
return indices.get(name);
}
/**
* Returns an index if this field has one (implicitly or
* explicitly) targeting the given name.
*/
@Override
public boolean existsIndex(String name) {
if (indices.get(name) != null) return true;
return name.equals(getName()) && doesIndexing();
}
/**
* Defined indices on this field
* @return defined indices on this
*/
@Override
public Map<String, Index> getIndices() {
return indices;
}
/**
* Sets the default rank type of this fields indices, and sets this rank type
* to all indices explicitly defined here which has no index set.
* (This complex behavior is dues to the fact than we would prefer to have rank types
* per field, not per index)
*/
public void setRankType(RankType rankType) {
this.rankType=rankType;
for (Index index : getIndices().values()) {
if (index.getRankType()==null)
index.setRankType(rankType);
}
}
/** Returns the rank settings set in a "rank" block for this field. This is never null. */
@Override
public Ranking getRanking() { return ranking; }
/** Returns the default rank type of indices of this field, or null if nothing is set */
@Override
public RankType getRankType() { return this.rankType; }
/**
* Returns the search-time attribute settings of this field or null if none is set.
*
* <p>TODO: Make unmodifiable.</p>
*/
@Override
public Map<String, Attribute> getAttributes() { return attributes; }
public Attribute getAttribute() {
return attributes.get(getName());
}
public void addAttribute(Attribute attribute) {
String name = attribute.getName();
if (name == null || "".equals(name)) {
name = getName();
attribute.setName(name);
}
attributes.put(attribute.getName(),attribute);
}
/**
* Returns the stemming setting of this field.
* Default is determined by the owning search definition.
*
* @return the stemming setting of this, or null, to use the default
*/
@Override
public Stemming getStemming() { return stemming; }
/**
* Whether this field should be stemmed in this search definition
*/
@Override
public Stemming getStemming(Search search) {
if (stemming != null)
return stemming;
else
return search.getStemming();
}
@Override
public Field asField() {
return this;
}
/**
* Sets how this field should be stemmed, or set to null to use the default.
*/
public void setStemming(Stemming stemming) {
this.stemming = stemming;
}
/** Returns an unmodifiable map of the summary fields defined in this */
@Override
public Map<String, SummaryField> getSummaryFields() {
return Collections.unmodifiableMap(summaryFields);
}
public void removeSummaryFields() {
summaryFields.clear();
}
/** Adds a summary field */
public void addSummaryField(SummaryField summaryField) {
summaryFields.put(summaryField.getName(),summaryField);
}
/**
* Returns a summary field defined (implicitly or explicitly) by this field.
* Returns null if there is no such summary field defined.
*/
@Override
public SummaryField getSummaryField(String name) {
return summaryFields.get(name);
}
/**
* Returns a summary field defined (implicitly or explicitly) by this field.
*
* @param create true to create the summary field and add it to this field before returning if it is missing
* @return the summary field, or null if not present and create is false
*/
public SummaryField getSummaryField(String name,boolean create) {
SummaryField summaryField=summaryFields.get(name);
if (summaryField==null && create) {
summaryField=new SummaryField(name, getDataType());
addSummaryField(summaryField);
}
return summaryFields.get(name);
}
/** Returns list of static struct fields */
@Override
public Collection<SDField> getStructFields() { return structFields.values(); }
/**
* Returns a struct field defined in this field,
* potentially traversing into nested structs.
* Returns null if there is no such struct field defined.
*/
@Override
public SDField getStructField(String name) {
if (name.contains(".")) {
String superFieldName = name.substring(0,name.indexOf("."));
String subFieldName = name.substring(name.indexOf(".")+1);
SDField superField = structFields.get(superFieldName);
if (superField != null) {
return superField.getStructField(subFieldName);
}
return null;
}
return structFields.get(name);
}
/**
* Returns how the content of this field should be accent normalized etc
*/
@Override
public NormalizeLevel getNormalizing() { return normalizing; }
/**
* Change how the content of this field should be accent normalized etc
*/
public void setNormalizing(NormalizeLevel level) { normalizing = level; }
public void addQueryCommand(String name) {
queryCommands.add(name);
}
public boolean hasQueryCommand(String name) {
return queryCommands.contains(name);
}
/** Returns a list of query commands */
@Override
public List<String> getQueryCommands() { return queryCommands; }
/** Returns the document that this field was declared in, or null */
private SDDocumentType getOwnerDocType() {
return ownerDocType;
}
@Override
public boolean equals(Object other) {
if ( ! (other instanceof SDField)) return false;
return super.equals(other);
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
public String toString() {
return "field '" + getName() + "'";
}
/** The aliases declared for this field */
@Override
public Map<String, String> getAliasToName() {
return aliasToName;
}
@Override
public boolean hasFullIndexingDocprocRights() {
Attribute self = getAttributes().get(getName());
return (!isExtraField() || ((self != null) && self.isMutable()));
}
} |
This is the only functional change, sorry for the noise. | private StorageNode buildSingleNode(DeployState deployState, ContentCluster parent) {
int distributionKey = 0;
StorageNode sNode = new StorageNode(deployState.getProperties(), parent.getStorageCluster(), 1.0, distributionKey , false);
sNode.setHostResource(parent.hostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC));
sNode.initService(deployLogger);
PersistenceEngine provider = parent.getPersistence().create(deployState, sNode, storageGroup, null);
new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider);
return sNode;
} | sNode.initService(deployLogger); | private StorageNode buildSingleNode(DeployState deployState, ContentCluster parent) {
int distributionKey = 0;
StorageNode searchNode = new StorageNode(deployState.getProperties(), parent.getStorageCluster(), 1.0, distributionKey , false);
searchNode.setHostResource(parent.hostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC));
PersistenceEngine provider = parent.getPersistence().create(deployState, searchNode, storageGroup, null);
searchNode.initService(deployLogger);
Distributor distributor = new Distributor(deployState.getProperties(), parent.getDistributorNodes(), distributionKey, null, provider);
distributor.setHostResource(searchNode.getHostResource());
distributor.initService(deployLogger);
return searchNode;
} | class GroupBuilder {
private final StorageGroup storageGroup;
/* The explicitly defined subgroups of this */
private final List<GroupBuilder> subGroups;
private final List<XmlNodeBuilder> nodeBuilders;
/** The nodes explicitly specified as a nodes tag in this group, or empty if none */
private final Optional<NodesSpecification> nodeRequirement;
private final DeployLogger deployLogger;
private GroupBuilder(StorageGroup storageGroup, List<GroupBuilder> subGroups, List<XmlNodeBuilder> nodeBuilders,
Optional<NodesSpecification> nodeRequirement, DeployLogger deployLogger) {
this.storageGroup = storageGroup;
this.subGroups = subGroups;
this.nodeBuilders = nodeBuilders;
this.nodeRequirement = nodeRequirement;
this.deployLogger = deployLogger;
}
/**
* Builds a storage group for a nonhosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildNonHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildNonHosted(deployState, owner, Optional.of(this)));
}
for (XmlNodeBuilder nodeBuilder : nodeBuilders) {
storageGroup.nodes.add(nodeBuilder.build(deployState, owner, storageGroup));
}
if (parent.isEmpty() && subGroups.isEmpty() && nodeBuilders.isEmpty()) {
storageGroup.nodes.add(buildSingleNode(deployState, owner));
}
return storageGroup;
}
/**
* Builds a storage group for a hosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
if (storageGroup.getIndex() != null)
throw new IllegalArgumentException("Specifying individual groups is not supported on hosted applications");
Map<HostResource, ClusterMembership> hostMapping =
nodeRequirement.isPresent() ?
provisionHosts(nodeRequirement.get(), owner.getStorageCluster().getClusterName(), owner.getRoot().hostSystem(), deployLogger) :
Collections.emptyMap();
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroups = collectAllocatedSubgroups(hostMapping);
if (hostGroups.size() > 1) {
if (parent.isPresent())
throw new IllegalArgumentException("Cannot specify groups using the groups attribute in nested content groups");
for (Map.Entry<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroup : hostGroups.entrySet()) {
String groupIndex = String.valueOf(hostGroup.getKey().get().index());
StorageGroup subgroup = new StorageGroup(true, groupIndex, groupIndex);
for (Map.Entry<HostResource, ClusterMembership> host : hostGroup.getValue().entrySet()) {
subgroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), subgroup, host.getValue()));
}
storageGroup.subgroups.add(subgroup);
}
}
else {
for (Map.Entry<HostResource, ClusterMembership> host : hostMapping.entrySet()) {
storageGroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), storageGroup, host.getValue()));
}
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildHosted(deployState, owner, Optional.of(this)));
}
}
return storageGroup;
}
/** Collect hosts per group */
private Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> collectAllocatedSubgroups(Map<HostResource, ClusterMembership> hostMapping) {
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostsPerGroup = new LinkedHashMap<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hostMapping.entrySet()) {
Optional<ClusterSpec.Group> group = entry.getValue().cluster().group();
Map<HostResource, ClusterMembership> hostsInGroup = hostsPerGroup.get(group);
if (hostsInGroup == null) {
hostsInGroup = new LinkedHashMap<>();
hostsPerGroup.put(group, hostsInGroup);
}
hostsInGroup.put(entry.getKey(), entry.getValue());
}
return hostsPerGroup;
}
} | class GroupBuilder {
private final StorageGroup storageGroup;
/* The explicitly defined subgroups of this */
private final List<GroupBuilder> subGroups;
private final List<XmlNodeBuilder> nodeBuilders;
/** The nodes explicitly specified as a nodes tag in this group, or empty if none */
private final Optional<NodesSpecification> nodeRequirement;
private final DeployLogger deployLogger;
private GroupBuilder(StorageGroup storageGroup, List<GroupBuilder> subGroups, List<XmlNodeBuilder> nodeBuilders,
Optional<NodesSpecification> nodeRequirement, DeployLogger deployLogger) {
this.storageGroup = storageGroup;
this.subGroups = subGroups;
this.nodeBuilders = nodeBuilders;
this.nodeRequirement = nodeRequirement;
this.deployLogger = deployLogger;
}
/**
* Builds a storage group for a nonhosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildNonHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildNonHosted(deployState, owner, Optional.of(this)));
}
for (XmlNodeBuilder nodeBuilder : nodeBuilders) {
storageGroup.nodes.add(nodeBuilder.build(deployState, owner, storageGroup));
}
if (parent.isEmpty() && subGroups.isEmpty() && nodeBuilders.isEmpty()) {
storageGroup.nodes.add(buildSingleNode(deployState, owner));
}
return storageGroup;
}
/**
* Builds a storage group for a hosted environment
*
* @param owner the cluster owning this
* @param parent the parent storage group, or empty if this is the root group
* @return the storage group build by this
*/
public StorageGroup buildHosted(DeployState deployState, ContentCluster owner, Optional<GroupBuilder> parent) {
if (storageGroup.getIndex() != null)
throw new IllegalArgumentException("Specifying individual groups is not supported on hosted applications");
Map<HostResource, ClusterMembership> hostMapping =
nodeRequirement.isPresent() ?
provisionHosts(nodeRequirement.get(), owner.getStorageCluster().getClusterName(), owner.getRoot().hostSystem(), deployLogger) :
Collections.emptyMap();
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroups = collectAllocatedSubgroups(hostMapping);
if (hostGroups.size() > 1) {
if (parent.isPresent())
throw new IllegalArgumentException("Cannot specify groups using the groups attribute in nested content groups");
for (Map.Entry<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostGroup : hostGroups.entrySet()) {
String groupIndex = String.valueOf(hostGroup.getKey().get().index());
StorageGroup subgroup = new StorageGroup(true, groupIndex, groupIndex);
for (Map.Entry<HostResource, ClusterMembership> host : hostGroup.getValue().entrySet()) {
subgroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), subgroup, host.getValue()));
}
storageGroup.subgroups.add(subgroup);
}
}
else {
for (Map.Entry<HostResource, ClusterMembership> host : hostMapping.entrySet()) {
storageGroup.nodes.add(createStorageNode(deployState, owner, host.getKey(), storageGroup, host.getValue()));
}
for (GroupBuilder subGroup : subGroups) {
storageGroup.subgroups.add(subGroup.buildHosted(deployState, owner, Optional.of(this)));
}
}
return storageGroup;
}
/** Collect hosts per group */
private Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> collectAllocatedSubgroups(Map<HostResource, ClusterMembership> hostMapping) {
Map<Optional<ClusterSpec.Group>, Map<HostResource, ClusterMembership>> hostsPerGroup = new LinkedHashMap<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hostMapping.entrySet()) {
Optional<ClusterSpec.Group> group = entry.getValue().cluster().group();
Map<HostResource, ClusterMembership> hostsInGroup = hostsPerGroup.get(group);
if (hostsInGroup == null) {
hostsInGroup = new LinkedHashMap<>();
hostsPerGroup.put(group, hostsInGroup);
}
hostsInGroup.put(entry.getKey(), entry.getValue());
}
return hostsPerGroup;
}
} |
Ensure close is called when holding the monitor lock. | private void trySendError(Throwable t) {
synchronized (monitor) {
if (!responseCommitted) {
responseCommitted = true;
servletResponse.setHeader(HttpHeaders.Names.EXPIRES, null);
servletResponse.setHeader(HttpHeaders.Names.LAST_MODIFIED, null);
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_TYPE, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_LENGTH, null);
String reasonPhrase = getReasonPhrase(t, developerMode);
int statusCode = getStatusCode(t);
setStatus(servletResponse, statusCode, reasonPhrase);
if (statusCode != HttpServletResponse.SC_NO_CONTENT &&
statusCode != HttpServletResponse.SC_NOT_MODIFIED &&
statusCode != HttpServletResponse.SC_PARTIAL_CONTENT &&
statusCode >= HttpServletResponse.SC_OK) {
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, "must-revalidate,no-cache,no-store");
servletResponse.setContentType(MimeTypes.Type.TEXT_HTML_8859_1.toString());
byte[] errorContent = errorResponseContentCreator
.createErrorContent(servletRequest.getRequestURI(), statusCode, reasonPhrase);
servletResponse.setContentLength(errorContent.length);
out.writeBuffer(ByteBuffer.wrap(errorContent), NOOP_COMPLETION_HANDLER);
} else {
servletResponse.setContentLength(0);
}
} else {
RuntimeException exceptionWithStackTrace = new RuntimeException(t);
log.log(Level.FINE, "Response already committed, can't change response code", exceptionWithStackTrace);
}
}
} | out.writeBuffer(ByteBuffer.wrap(errorContent), NOOP_COMPLETION_HANDLER); | private void trySendError(Throwable t) {
if (!responseCommitted) {
responseCommitted = true;
servletResponse.setHeader(HttpHeaders.Names.EXPIRES, null);
servletResponse.setHeader(HttpHeaders.Names.LAST_MODIFIED, null);
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_TYPE, null);
servletResponse.setHeader(HttpHeaders.Names.CONTENT_LENGTH, null);
String reasonPhrase = getReasonPhrase(t, developerMode);
int statusCode = getStatusCode(t);
setStatus(servletResponse, statusCode, reasonPhrase);
if (statusCode != HttpServletResponse.SC_NO_CONTENT &&
statusCode != HttpServletResponse.SC_NOT_MODIFIED &&
statusCode != HttpServletResponse.SC_PARTIAL_CONTENT &&
statusCode >= HttpServletResponse.SC_OK) {
servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, "must-revalidate,no-cache,no-store");
servletResponse.setContentType(MimeTypes.Type.TEXT_HTML_8859_1.toString());
byte[] errorContent = errorResponseContentCreator
.createErrorContent(servletRequest.getRequestURI(), statusCode, reasonPhrase);
servletResponse.setContentLength(errorContent.length);
out.writeBuffer(ByteBuffer.wrap(errorContent), NOOP_COMPLETION_HANDLER);
} else {
servletResponse.setContentLength(0);
}
} else {
RuntimeException exceptionWithStackTrace = new RuntimeException(t);
log.log(Level.FINE, "Response already committed, can't change response code", exceptionWithStackTrace);
}
} | class ServletResponseController {
private static final Logger log = Logger.getLogger(ServletResponseController.class.getName());
/**
* The servlet spec does not require (Http)ServletResponse nor ServletOutputStream to be thread-safe. Therefore,
* we must provide our own synchronization, since we may attempt to access these objects simultaneously from
* different threads. (The typical cause of this is when one thread is writing a response while another thread
* throws an exception, causing the request to fail with an error response).
*/
private final Object monitor = new Object();
private final HttpServletRequest servletRequest;
private final HttpServletResponse servletResponse;
private final boolean developerMode;
private final ErrorResponseContentCreator errorResponseContentCreator = new ErrorResponseContentCreator();
private final ServletOutputStreamWriter out;
private boolean responseCommitted = false;
ServletResponseController(
HttpServletRequest servletRequest,
HttpServletResponse servletResponse,
Janitor janitor,
RequestMetricReporter metricReporter,
boolean developerMode) throws IOException {
this.servletRequest = servletRequest;
this.servletResponse = servletResponse;
this.developerMode = developerMode;
this.out = new ServletOutputStreamWriter(servletResponse.getOutputStream(), janitor, metricReporter);
}
void fail(Throwable t) {
try {
trySendError(t);
} catch (Throwable suppressed) {
t.addSuppressed(suppressed);
} finally {
out.close();
}
}
/**
* When this future completes there will be no more calls against the servlet output stream or servlet response.
* The framework is still allowed to invoke us though.
*
* The future might complete in the servlet framework thread, user thread or executor thread.
*/
CompletableFuture<Void> finishedFuture() { return out.finishedFuture(); }
ResponseHandler responseHandler() { return responseHandler; }
private static int getStatusCode(Throwable t) {
if (t instanceof BindingNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof BindingSetNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof RequestException) {
return ((RequestException)t).getResponseStatus();
} else if (t instanceof TimeoutException) {
return HttpServletResponse.SC_SERVICE_UNAVAILABLE;
} else {
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
}
private static String getReasonPhrase(Throwable t, boolean developerMode) {
if (developerMode) {
final StringWriter out = new StringWriter();
t.printStackTrace(new PrintWriter(out));
return out.toString();
} else if (t.getMessage() != null) {
return t.getMessage();
} else {
return t.toString();
}
}
private void setResponse(Response jdiscResponse) {
synchronized (monitor) {
servletRequest.setAttribute(HttpResponseStatisticsCollector.requestTypeAttribute, jdiscResponse.getRequestType());
if (responseCommitted) {
log.log(Level.FINE,
jdiscResponse.getError(),
() -> "Response already committed, can't change response code. " +
"From: " + servletResponse.getStatus() + ", To: " + jdiscResponse.getStatus());
out.close();
return;
}
if (jdiscResponse instanceof HttpResponse) {
setStatus(servletResponse, jdiscResponse.getStatus(), ((HttpResponse) jdiscResponse).getMessage());
} else {
String message = Optional.ofNullable(jdiscResponse.getError())
.flatMap(error -> Optional.ofNullable(error.getMessage()))
.orElse(null);
setStatus(servletResponse, jdiscResponse.getStatus(), message);
}
for (final Map.Entry<String, String> entry : jdiscResponse.headers().entries()) {
servletResponse.addHeader(entry.getKey(), entry.getValue());
}
if (servletResponse.getContentType() == null) {
servletResponse.setContentType("text/plain;charset=utf-8");
}
}
}
@SuppressWarnings("deprecation")
private static void setStatus(HttpServletResponse response, int statusCode, String reasonPhrase) {
if (reasonPhrase != null) {
response.setStatus(statusCode, reasonPhrase);
} else {
response.setStatus(statusCode);
}
}
private void ensureCommitted() {
synchronized (monitor) {
responseCommitted = true;
}
}
private final ResponseHandler responseHandler = new ResponseHandler() {
@Override
public ContentChannel handleResponse(Response response) {
setResponse(response);
return responseContentChannel;
}
};
private final ContentChannel responseContentChannel = new ContentChannel() {
@Override
public void write(ByteBuffer buf, CompletionHandler handler) {
ensureCommitted();
out.writeBuffer(buf, handlerOrNoopHandler(handler));
}
@Override
public void close(CompletionHandler handler) {
ensureCommitted();
out.close(handlerOrNoopHandler(handler));
}
private CompletionHandler handlerOrNoopHandler(CompletionHandler handler) {
return handler != null ? handler : NOOP_COMPLETION_HANDLER;
}
};
} | class ServletResponseController {
private static final Logger log = Logger.getLogger(ServletResponseController.class.getName());
/**
* The servlet spec does not require (Http)ServletResponse nor ServletOutputStream to be thread-safe. Therefore,
* we must provide our own synchronization, since we may attempt to access these objects simultaneously from
* different threads. (The typical cause of this is when one thread is writing a response while another thread
* throws an exception, causing the request to fail with an error response).
*/
private final Object monitor = new Object();
private final HttpServletRequest servletRequest;
private final HttpServletResponse servletResponse;
private final boolean developerMode;
private final ErrorResponseContentCreator errorResponseContentCreator = new ErrorResponseContentCreator();
private final ServletOutputStreamWriter out;
private boolean responseCommitted = false;
ServletResponseController(
HttpServletRequest servletRequest,
HttpServletResponse servletResponse,
Janitor janitor,
RequestMetricReporter metricReporter,
boolean developerMode) throws IOException {
this.servletRequest = servletRequest;
this.servletResponse = servletResponse;
this.developerMode = developerMode;
this.out = new ServletOutputStreamWriter(servletResponse.getOutputStream(), janitor, metricReporter);
}
void fail(Throwable t) {
synchronized (monitor) {
try {
trySendError(t);
} catch (Throwable suppressed) {
t.addSuppressed(suppressed);
} finally {
out.close();
}
}
}
/**
* When this future completes there will be no more calls against the servlet output stream or servlet response.
* The framework is still allowed to invoke us though.
*
* The future might complete in the servlet framework thread, user thread or executor thread.
*/
CompletableFuture<Void> finishedFuture() { return out.finishedFuture(); }
ResponseHandler responseHandler() { return responseHandler; }
private static int getStatusCode(Throwable t) {
if (t instanceof BindingNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof BindingSetNotFoundException) {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof RequestException) {
return ((RequestException)t).getResponseStatus();
} else if (t instanceof TimeoutException) {
return HttpServletResponse.SC_SERVICE_UNAVAILABLE;
} else {
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
}
private static String getReasonPhrase(Throwable t, boolean developerMode) {
if (developerMode) {
final StringWriter out = new StringWriter();
t.printStackTrace(new PrintWriter(out));
return out.toString();
} else if (t.getMessage() != null) {
return t.getMessage();
} else {
return t.toString();
}
}
private void setResponse(Response jdiscResponse) {
synchronized (monitor) {
servletRequest.setAttribute(HttpResponseStatisticsCollector.requestTypeAttribute, jdiscResponse.getRequestType());
if (responseCommitted) {
log.log(Level.FINE,
jdiscResponse.getError(),
() -> "Response already committed, can't change response code. " +
"From: " + servletResponse.getStatus() + ", To: " + jdiscResponse.getStatus());
out.close();
return;
}
if (jdiscResponse instanceof HttpResponse) {
setStatus(servletResponse, jdiscResponse.getStatus(), ((HttpResponse) jdiscResponse).getMessage());
} else {
String message = Optional.ofNullable(jdiscResponse.getError())
.flatMap(error -> Optional.ofNullable(error.getMessage()))
.orElse(null);
setStatus(servletResponse, jdiscResponse.getStatus(), message);
}
for (final Map.Entry<String, String> entry : jdiscResponse.headers().entries()) {
servletResponse.addHeader(entry.getKey(), entry.getValue());
}
if (servletResponse.getContentType() == null) {
servletResponse.setContentType("text/plain;charset=utf-8");
}
}
}
@SuppressWarnings("deprecation")
private static void setStatus(HttpServletResponse response, int statusCode, String reasonPhrase) {
if (reasonPhrase != null) {
response.setStatus(statusCode, reasonPhrase);
} else {
response.setStatus(statusCode);
}
}
private void ensureCommitted() {
synchronized (monitor) {
responseCommitted = true;
}
}
private final ResponseHandler responseHandler = new ResponseHandler() {
@Override
public ContentChannel handleResponse(Response response) {
setResponse(response);
return responseContentChannel;
}
};
private final ContentChannel responseContentChannel = new ContentChannel() {
@Override
public void write(ByteBuffer buf, CompletionHandler handler) {
ensureCommitted();
out.writeBuffer(buf, handlerOrNoopHandler(handler));
}
@Override
public void close(CompletionHandler handler) {
ensureCommitted();
out.close(handlerOrNoopHandler(handler));
}
private CompletionHandler handlerOrNoopHandler(CompletionHandler handler) {
return handler != null ? handler : NOOP_COMPLETION_HANDLER;
}
};
} |
Where is `notify` called? Why not `sleep`? | boolean oneMoreCheck(int timeoutMS) {
synchronized (done) {
if (!done.get()) {
try {
done.wait(timeoutMS);
} catch (InterruptedException e) {
log.log(Level.WARNING, "Ignoring interrupt signal in timeout manager.", e);
}
}
}
return ! done.get();
} | done.wait(timeoutMS); | boolean oneMoreCheck(int timeoutMS) {
synchronized (done) {
if (!done.get()) {
try {
done.wait(timeoutMS);
} catch (InterruptedException e) {
log.log(Level.WARNING, "Ignoring interrupt signal in timeout manager.", e);
}
}
}
return ! done.get();
} | class ManagerTask implements Runnable {
@Override
public void run() {
while (oneMoreCheck(ScheduledQueue.MILLIS_PER_SLOT)) {
checkTasks(timer.currentTimeMillis());
}
}
} | class ManagerTask implements Runnable {
@Override
public void run() {
while (oneMoreCheck(ScheduledQueue.MILLIS_PER_SLOT)) {
checkTasks(timer.currentTimeMillis());
}
}
} |
notify forgotten...., Because unconditional sleep should never be used. You always want the option to wake them up. This brought down runtime of unit tests from 1m10s to 12s... And adding the join revealed an eternal loop in unit tests too. | boolean oneMoreCheck(int timeoutMS) {
synchronized (done) {
if (!done.get()) {
try {
done.wait(timeoutMS);
} catch (InterruptedException e) {
log.log(Level.WARNING, "Ignoring interrupt signal in timeout manager.", e);
}
}
}
return ! done.get();
} | done.wait(timeoutMS); | boolean oneMoreCheck(int timeoutMS) {
synchronized (done) {
if (!done.get()) {
try {
done.wait(timeoutMS);
} catch (InterruptedException e) {
log.log(Level.WARNING, "Ignoring interrupt signal in timeout manager.", e);
}
}
}
return ! done.get();
} | class ManagerTask implements Runnable {
@Override
public void run() {
while (oneMoreCheck(ScheduledQueue.MILLIS_PER_SLOT)) {
checkTasks(timer.currentTimeMillis());
}
}
} | class ManagerTask implements Runnable {
@Override
public void run() {
while (oneMoreCheck(ScheduledQueue.MILLIS_PER_SLOT)) {
checkTasks(timer.currentTimeMillis());
}
}
} |
should probably normalize pathOnHost before comparing with containerRootOnHost? | private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
} | if (!pathOnHost.startsWith(containerRootOnHost)) | private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStore = containerRootOnHost.relativize(pathOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStore.getNameCount(); i++) {
String part = pathUnderContainerStore.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
parts.add(part);
}
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
}
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStorage = containerRootOnHost.relativize(pathOnHost);
if (pathUnderContainerStorage.getNameCount() == 0 || pathUnderContainerStorage.getName(0).toString().isEmpty())
return new ContainerPath(containerFs, pathOnHost, new String[0]);
if (pathUnderContainerStorage.getName(0).toString().equals(".."))
throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStorage.getNameCount(); i++)
parts.add(pathUnderContainerStorage.getName(i).toString());
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
}
} |
This would be wrong if parts[parts.size() - 1] is a symlink. But then I think normalization is also not symlink friendly, so using ContainerPath with symlinks may be disastrous. Exposing symlinks that are supposed to work both inside and outside of the container is also only going to work if they are relative, so symlinks is something we ought to stay away from anyways. So is that official - that container fs does not support symlinks? | private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
} | if (!parts.isEmpty()) parts.remove(parts.size() - 1); | private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
}
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStore = containerRootOnHost.relativize(pathOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStore.getNameCount(); i++) {
String part = pathUnderContainerStore.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
parts.add(part);
}
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
}
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
}
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStorage = containerRootOnHost.relativize(pathOnHost);
if (pathUnderContainerStorage.getNameCount() == 0 || pathUnderContainerStorage.getName(0).toString().isEmpty())
return new ContainerPath(containerFs, pathOnHost, new String[0]);
if (pathUnderContainerStorage.getName(0).toString().equals(".."))
throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStorage.getNameCount(); i++)
parts.add(pathUnderContainerStorage.getName(i).toString());
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
}
} |
But this would throw even for paths like /data/vespa/storage/foo/opt/.. ? | static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStore = containerRootOnHost.relativize(pathOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStore.getNameCount(); i++) {
String part = pathUnderContainerStore.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
parts.add(part);
}
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
} | if (part.equals("..")) throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost); | static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStorage = containerRootOnHost.relativize(pathOnHost);
if (pathUnderContainerStorage.getNameCount() == 0 || pathUnderContainerStorage.getName(0).toString().isEmpty())
return new ContainerPath(containerFs, pathOnHost, new String[0]);
if (pathUnderContainerStorage.getName(0).toString().equals(".."))
throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStorage.getNameCount(); i++)
parts.add(pathUnderContainerStorage.getName(i).toString());
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
}
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
}
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
} |
Have you considered doing the uid -> name mapping here? | private NamedPrincipal(int id, UserPrincipal baseFsPrincipal) {
this.id = id;
this.name = Integer.toString(id);
this.baseFsPrincipal = Objects.requireNonNull(baseFsPrincipal);
} | this.name = Integer.toString(id); | private NamedPrincipal(int id, UserPrincipal baseFsPrincipal) {
this.id = id;
this.name = Optional.ofNullable(CONTAINER_IDS_BY_NAME.inverse().get(id)).orElseGet(() -> Integer.toString(id));
this.baseFsPrincipal = Objects.requireNonNull(baseFsPrincipal);
} | class NamedPrincipal implements UserPrincipal {
private final int id;
private final String name;
private final UserPrincipal baseFsPrincipal;
@Override
public final String getName() {
return name;
}
public int id() {
return id;
}
public UserPrincipal baseFsPrincipal() {
return baseFsPrincipal;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NamedPrincipal that = (NamedPrincipal) o;
return id == that.id && baseFsPrincipal.equals(that.baseFsPrincipal);
}
@Override
public int hashCode() {
return Objects.hash(id, baseFsPrincipal);
}
} | class NamedPrincipal implements UserPrincipal {
private final int id;
private final String name;
private final UserPrincipal baseFsPrincipal;
@Override
public final String getName() {
return name;
}
public int id() {
return id;
}
public UserPrincipal baseFsPrincipal() {
return baseFsPrincipal;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NamedPrincipal that = (NamedPrincipal) o;
return id == that.id && baseFsPrincipal.equals(that.baseFsPrincipal);
}
@Override
public int hashCode() {
return Objects.hash(id, baseFsPrincipal);
}
@Override
public String toString() {
return "{id=" + id + ", baseFsPrincipal=" + baseFsPrincipal + '}';
}
} |
For `root` and `vespa`? | private NamedPrincipal(int id, UserPrincipal baseFsPrincipal) {
this.id = id;
this.name = Integer.toString(id);
this.baseFsPrincipal = Objects.requireNonNull(baseFsPrincipal);
} | this.name = Integer.toString(id); | private NamedPrincipal(int id, UserPrincipal baseFsPrincipal) {
this.id = id;
this.name = Optional.ofNullable(CONTAINER_IDS_BY_NAME.inverse().get(id)).orElseGet(() -> Integer.toString(id));
this.baseFsPrincipal = Objects.requireNonNull(baseFsPrincipal);
} | class NamedPrincipal implements UserPrincipal {
private final int id;
private final String name;
private final UserPrincipal baseFsPrincipal;
@Override
public final String getName() {
return name;
}
public int id() {
return id;
}
public UserPrincipal baseFsPrincipal() {
return baseFsPrincipal;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NamedPrincipal that = (NamedPrincipal) o;
return id == that.id && baseFsPrincipal.equals(that.baseFsPrincipal);
}
@Override
public int hashCode() {
return Objects.hash(id, baseFsPrincipal);
}
} | class NamedPrincipal implements UserPrincipal {
private final int id;
private final String name;
private final UserPrincipal baseFsPrincipal;
@Override
public final String getName() {
return name;
}
public int id() {
return id;
}
public UserPrincipal baseFsPrincipal() {
return baseFsPrincipal;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NamedPrincipal that = (NamedPrincipal) o;
return id == that.id && baseFsPrincipal.equals(that.baseFsPrincipal);
}
@Override
public int hashCode() {
return Objects.hash(id, baseFsPrincipal);
}
@Override
public String toString() {
return "{id=" + id + ", baseFsPrincipal=" + baseFsPrincipal + '}';
}
} |
yes | private NamedPrincipal(int id, UserPrincipal baseFsPrincipal) {
this.id = id;
this.name = Integer.toString(id);
this.baseFsPrincipal = Objects.requireNonNull(baseFsPrincipal);
} | this.name = Integer.toString(id); | private NamedPrincipal(int id, UserPrincipal baseFsPrincipal) {
this.id = id;
this.name = Optional.ofNullable(CONTAINER_IDS_BY_NAME.inverse().get(id)).orElseGet(() -> Integer.toString(id));
this.baseFsPrincipal = Objects.requireNonNull(baseFsPrincipal);
} | class NamedPrincipal implements UserPrincipal {
private final int id;
private final String name;
private final UserPrincipal baseFsPrincipal;
@Override
public final String getName() {
return name;
}
public int id() {
return id;
}
public UserPrincipal baseFsPrincipal() {
return baseFsPrincipal;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NamedPrincipal that = (NamedPrincipal) o;
return id == that.id && baseFsPrincipal.equals(that.baseFsPrincipal);
}
@Override
public int hashCode() {
return Objects.hash(id, baseFsPrincipal);
}
} | class NamedPrincipal implements UserPrincipal {
private final int id;
private final String name;
private final UserPrincipal baseFsPrincipal;
@Override
public final String getName() {
return name;
}
public int id() {
return id;
}
public UserPrincipal baseFsPrincipal() {
return baseFsPrincipal;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NamedPrincipal that = (NamedPrincipal) o;
return id == that.id && baseFsPrincipal.equals(that.baseFsPrincipal);
}
@Override
public int hashCode() {
return Objects.hash(id, baseFsPrincipal);
}
@Override
public String toString() {
return "{id=" + id + ", baseFsPrincipal=" + baseFsPrincipal + '}';
}
} |
Done | private NamedPrincipal(int id, UserPrincipal baseFsPrincipal) {
this.id = id;
this.name = Integer.toString(id);
this.baseFsPrincipal = Objects.requireNonNull(baseFsPrincipal);
} | this.name = Integer.toString(id); | private NamedPrincipal(int id, UserPrincipal baseFsPrincipal) {
this.id = id;
this.name = Optional.ofNullable(CONTAINER_IDS_BY_NAME.inverse().get(id)).orElseGet(() -> Integer.toString(id));
this.baseFsPrincipal = Objects.requireNonNull(baseFsPrincipal);
} | class NamedPrincipal implements UserPrincipal {
private final int id;
private final String name;
private final UserPrincipal baseFsPrincipal;
@Override
public final String getName() {
return name;
}
public int id() {
return id;
}
public UserPrincipal baseFsPrincipal() {
return baseFsPrincipal;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NamedPrincipal that = (NamedPrincipal) o;
return id == that.id && baseFsPrincipal.equals(that.baseFsPrincipal);
}
@Override
public int hashCode() {
return Objects.hash(id, baseFsPrincipal);
}
} | class NamedPrincipal implements UserPrincipal {
private final int id;
private final String name;
private final UserPrincipal baseFsPrincipal;
@Override
public final String getName() {
return name;
}
public int id() {
return id;
}
public UserPrincipal baseFsPrincipal() {
return baseFsPrincipal;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NamedPrincipal that = (NamedPrincipal) o;
return id == that.id && baseFsPrincipal.equals(that.baseFsPrincipal);
}
@Override
public int hashCode() {
return Objects.hash(id, baseFsPrincipal);
}
@Override
public String toString() {
return "{id=" + id + ", baseFsPrincipal=" + baseFsPrincipal + '}';
}
} |
Path is normalized before, only want to get ".." is at the start, if the given path is outside container root on host. | static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStore = containerRootOnHost.relativize(pathOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStore.getNameCount(); i++) {
String part = pathUnderContainerStore.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
parts.add(part);
}
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
} | if (part.equals("..")) throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost); | static ContainerPath fromPathOnHost(ContainerFileSystem containerFs, Path pathOnHost) {
pathOnHost = pathOnHost.normalize();
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
Path pathUnderContainerStorage = containerRootOnHost.relativize(pathOnHost);
if (pathUnderContainerStorage.getNameCount() == 0 || pathUnderContainerStorage.getName(0).toString().isEmpty())
return new ContainerPath(containerFs, pathOnHost, new String[0]);
if (pathUnderContainerStorage.getName(0).toString().equals(".."))
throw new IllegalArgumentException("Path " + pathOnHost + " is not under container root " + containerRootOnHost);
List<String> parts = new ArrayList<>();
for (int i = 0; i < pathUnderContainerStorage.getNameCount(); i++)
parts.add(pathUnderContainerStorage.getName(i).toString());
return new ContainerPath(containerFs, pathOnHost, parts.toArray(String[]::new));
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
}
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
} | class ContainerPath implements Path {
private final ContainerFileSystem containerFs;
private final Path pathOnHost;
private final String[] parts;
private ContainerPath(ContainerFileSystem containerFs, Path pathOnHost, String[] parts) {
this.containerFs = Objects.requireNonNull(containerFs);
this.pathOnHost = Objects.requireNonNull(pathOnHost);
this.parts = Objects.requireNonNull(parts);
if (!pathOnHost.isAbsolute())
throw new IllegalArgumentException("Path host must be absolute: " + pathOnHost);
Path containerRootOnHost = containerFs.provider().containerRootOnHost();
if (!pathOnHost.startsWith(containerRootOnHost))
throw new IllegalArgumentException("Path on host (" + pathOnHost + ") must start with container root on host (" + containerRootOnHost + ")");
}
public Path pathOnHost() {
return pathOnHost;
}
@Override
public FileSystem getFileSystem() {
return containerFs;
}
@Override
public ContainerPath getRoot() {
return resolve(containerFs, new String[0], Path.of("/"));
}
@Override
public Path getFileName() {
if (parts.length == 0) return null;
return Path.of(parts[parts.length - 1]);
}
@Override
public ContainerPath getParent() {
if (parts.length == 0) return null;
return new ContainerPath(containerFs, pathOnHost.getParent(), Arrays.copyOf(parts, parts.length-1));
}
@Override
public int getNameCount() {
return parts.length;
}
@Override
public Path getName(int index) {
return Path.of(parts[index]);
}
@Override
public Path subpath(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex >= endIndex || endIndex > parts.length)
throw new IllegalArgumentException();
if (endIndex - beginIndex == 1) return getName(beginIndex);
String[] rest = new String[endIndex - beginIndex - 1];
System.arraycopy(parts, beginIndex + 1, rest, 0, rest.length);
return Path.of(parts[beginIndex], rest);
}
@Override
public ContainerPath resolve(Path other) {
return resolve(containerFs, parts, other);
}
@Override
public ContainerPath resolveSibling(String other) {
return resolve(Path.of("..", other));
}
@Override
public boolean startsWith(Path other) {
if (other.getFileSystem() != containerFs) return false;
String[] otherParts = toContainerPath(other).parts;
if (parts.length < otherParts.length) return false;
for (int i = 0; i < otherParts.length; i++) {
if ( ! parts[i].equals(otherParts[i])) return false;
}
return true;
}
@Override
public boolean endsWith(Path other) {
int offset = parts.length - other.getNameCount();
if (offset < 0 || (other.isAbsolute() && offset > 0)) return false;
for (int i = 0; i < other.getNameCount(); i++) {
if ( ! parts[offset + i].equals(other.getName(i).toString())) return false;
}
return true;
}
@Override
public boolean isAbsolute() {
return true;
}
@Override
public ContainerPath normalize() {
return this;
}
@Override
public ContainerPath toAbsolutePath() {
return this;
}
@Override
public ContainerPath toRealPath(LinkOption... options) throws IOException {
Path realPathOnHost = pathOnHost.toRealPath(options);
if (realPathOnHost.equals(pathOnHost)) return this;
return fromPathOnHost(containerFs, realPathOnHost);
}
@Override
public Path relativize(Path other) {
return pathOnHost.relativize(toContainerPath(other).pathOnHost);
}
@Override
public URI toUri() {
throw new UnsupportedOperationException();
}
@Override
public WatchKey register(WatchService watcher, WatchEvent.Kind<?>[] events, WatchEvent.Modifier... modifiers) throws IOException {
return pathOnHost.register(watcher, events, modifiers);
}
@Override
public int compareTo(Path other) {
return pathOnHost.compareTo(toContainerPath(other));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ContainerPath paths = (ContainerPath) o;
return containerFs.equals(paths.containerFs) && pathOnHost.equals(paths.pathOnHost) && Arrays.equals(parts, paths.parts);
}
@Override
public int hashCode() {
int result = Objects.hash(containerFs, pathOnHost);
result = 31 * result + Arrays.hashCode(parts);
return result;
}
@Override
public String toString() {
return '/' + String.join("/", parts);
}
private static ContainerPath resolve(ContainerFileSystem containerFs, String[] currentParts, Path other) {
List<String> parts = other.isAbsolute() ? new ArrayList<>() : new ArrayList<>(Arrays.asList(currentParts));
for (int i = 0; i < other.getNameCount(); i++) {
String part = other.getName(i).toString();
if (part.isEmpty() || part.equals(".")) continue;
if (part.equals("..")) {
if (!parts.isEmpty()) parts.remove(parts.size() - 1);
continue;
}
parts.add(part);
}
return new ContainerPath(containerFs,
containerFs.provider().containerRootOnHost().resolve(String.join("/", parts)),
parts.toArray(String[]::new));
}
static ContainerPath fromPathInContainer(ContainerFileSystem containerFs, Path pathInContainer) {
if (!pathInContainer.isAbsolute())
throw new IllegalArgumentException("Path in container must be absolute: " + pathInContainer);
return resolve(containerFs, new String[0], pathInContainer);
}
} |
Leftover debug statement | public void setInherited(String inherited) {
System.out.println("Adding inheritance of " + inherited + " in " + this);
this.inherited = inherited;
} | System.out.println("Adding inheritance of " + inherited + " in " + this); | public void setInherited(String inherited) {
this.inherited = inherited;
} | class DocumentSummary extends FieldView {
private boolean fromDisk = false;
private boolean omitSummaryFeatures = false;
private String inherited;
private final Search owner;
/** Creates a DocumentSummary with the given name. */
public DocumentSummary(String name, Search owner) {
super(name);
this.owner = owner;
}
public void setFromDisk(boolean fromDisk) { this.fromDisk = fromDisk; }
/** Returns whether the user has noted explicitly that this summary accesses disk */
public boolean isFromDisk() { return fromDisk; }
public void setOmitSummaryFeatures(boolean value) {
omitSummaryFeatures = value;
}
public boolean omitSummaryFeatures() {
return omitSummaryFeatures;
}
/**
* The model is constrained to ensure that summary fields of the same name
* in different classes have the same summary transform, because this is
* what is supported by the backend currently.
*
* @param summaryField the summaryfield to add
*/
public void add(SummaryField summaryField) {
summaryField.addDestination(getName());
super.add(summaryField);
}
public SummaryField getSummaryField(String name) {
var parent = getInherited();
if (parent != null) {
return parent.getSummaryField(name);
}
return (SummaryField) get(name);
}
public Collection<SummaryField> getSummaryFields() {
var fields = new ArrayList<SummaryField>(getFields().size());
var parent = getInherited();
if (parent != null) {
fields.addAll(parent.getSummaryFields());
}
for (var field : getFields()) {
fields.add((SummaryField) field);
}
return fields;
}
/**
* Removes implicit fields which shouldn't be included.
* This is implicitly added fields which are sources for
* other fields. We then assume they are not intended to be added
* implicitly in addition.
* This should be called when this summary is complete.
*/
public void purgeImplicits() {
List<SummaryField> falseImplicits = new ArrayList<>();
for (SummaryField summaryField : getSummaryFields() ) {
if (summaryField.isImplicit()) continue;
for (Iterator<SummaryField.Source> j = summaryField.sourceIterator(); j.hasNext(); ) {
String sourceName = j.next().getName();
if (sourceName.equals(summaryField.getName())) continue;
SummaryField sourceField=getSummaryField(sourceName);
if (sourceField==null) continue;
if (!sourceField.isImplicit()) continue;
falseImplicits.add(sourceField);
}
}
for (SummaryField field : falseImplicits) {
remove(field.getName());
}
}
/** Sets the parent of this. Both summaries must be present in the same search definition */
/** Returns the parent of this, or null if none is inherited */
public DocumentSummary getInherited() {
return owner.getSummary(inherited);
}
/** Returns the name of the summary this was declared to inherit, or null if not sett to inherit anything */
public String getInheritedName() {
return inherited;
}
@Override
public String toString() {
return "document summary '" + getName() + "'";
}
public void validate() {
if (inherited != null) {
if ( ! owner.getSummaries().containsKey(inherited))
throw new IllegalArgumentException(this + " inherits " + inherited + " but this" +
" is not present in " + owner);
}
}
} | class DocumentSummary extends FieldView {
private boolean fromDisk = false;
private boolean omitSummaryFeatures = false;
private String inherited;
private final Search owner;
/** Creates a DocumentSummary with the given name. */
public DocumentSummary(String name, Search owner) {
super(name);
this.owner = owner;
}
public void setFromDisk(boolean fromDisk) { this.fromDisk = fromDisk; }
/** Returns whether the user has noted explicitly that this summary accesses disk */
public boolean isFromDisk() { return fromDisk; }
public void setOmitSummaryFeatures(boolean value) {
omitSummaryFeatures = value;
}
public boolean omitSummaryFeatures() {
return omitSummaryFeatures;
}
/**
* The model is constrained to ensure that summary fields of the same name
* in different classes have the same summary transform, because this is
* what is supported by the backend currently.
*
* @param summaryField the summaryfield to add
*/
public void add(SummaryField summaryField) {
summaryField.addDestination(getName());
super.add(summaryField);
}
public SummaryField getSummaryField(String name) {
var parent = getInherited();
if (parent != null) {
return parent.getSummaryField(name);
}
return (SummaryField) get(name);
}
public Collection<SummaryField> getSummaryFields() {
var fields = new ArrayList<SummaryField>(getFields().size());
var parent = getInherited();
if (parent != null) {
fields.addAll(parent.getSummaryFields());
}
for (var field : getFields()) {
fields.add((SummaryField) field);
}
return fields;
}
/**
* Removes implicit fields which shouldn't be included.
* This is implicitly added fields which are sources for
* other fields. We then assume they are not intended to be added
* implicitly in addition.
* This should be called when this summary is complete.
*/
public void purgeImplicits() {
List<SummaryField> falseImplicits = new ArrayList<>();
for (SummaryField summaryField : getSummaryFields() ) {
if (summaryField.isImplicit()) continue;
for (Iterator<SummaryField.Source> j = summaryField.sourceIterator(); j.hasNext(); ) {
String sourceName = j.next().getName();
if (sourceName.equals(summaryField.getName())) continue;
SummaryField sourceField=getSummaryField(sourceName);
if (sourceField==null) continue;
if (!sourceField.isImplicit()) continue;
falseImplicits.add(sourceField);
}
}
for (SummaryField field : falseImplicits) {
remove(field.getName());
}
}
/** Sets the parent of this. Both summaries must be present in the same search definition */
/** Returns the parent of this, or null if none is inherited */
public DocumentSummary getInherited() {
return owner.getSummary(inherited);
}
/** Returns the name of the summary this was declared to inherit, or null if not sett to inherit anything */
public String getInheritedName() {
return inherited;
}
@Override
public String toString() {
return "document summary '" + getName() + "'";
}
public void validate(DeployLogger logger) {
if (inherited != null) {
if ( ! owner.getSummaries().containsKey(inherited)) {
logger.log(Level.WARNING,
this + " inherits " + inherited + " but this" + " is not present in " + owner);
logger.logApplicationPackage(Level.WARNING,
this + " inherits " + inherited + " but this" + " is not present in " + owner);
}
}
}
} |
Do we want this to be `info` level? | public void run() {
controllerThreadId = Thread.currentThread().getId();
context.log(logger, Level.INFO, "Starting ticks");
try {
processingCycle = true;
while (isRunning()) {
tick();
}
} catch (InterruptedException e) {
context.log(logger, Level.FINE, () -> "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t);
synchronized (monitor) { running.set(false); }
System.exit(1);
} finally {
prepareShutdownEdge();
}
} | context.log(logger, Level.INFO, "Starting ticks"); | public void run() {
controllerThreadId = Thread.currentThread().getId();
context.log(logger, Level.INFO, "Starting tick loop");
try {
processingCycle = true;
while (isRunning()) {
tick();
}
context.log(logger, Level.INFO, "Tick loop stopped");
} catch (InterruptedException e) {
context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e);
} catch (Throwable t) {
t.printStackTrace();
context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t);
synchronized (monitor) { running.set(false); }
System.exit(1);
} finally {
prepareShutdownEdge();
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static final Logger logger = Logger.getLogger(FleetController.class.getName());
private final Context context;
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private final AtomicBoolean running = new AtomicBoolean(true);
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long lastMetricUpdateCycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<ClusterStateBundle> newStates = new ArrayList<>();
private final List<ClusterStateBundle> convergedStates = new ArrayList<>();
private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean inMasterMoratorium = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>();
private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>();
private Set<String> configuredBucketSpaces = Collections.emptySet();
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return 0; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Context context,
Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) {
context.log(logger, Level.INFO, "Created");
this.context = context;
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController create(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
var context = new ContextImpl(options);
var timer = new RealTimer();
var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName);
var log = new EventLog(timer, metricUpdater);
var cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution);
var stateGatherer = new NodeStateGatherer(timer, timer, log);
var communicator = new RPCCommunicator(
RPCCommunicator.createRealSupervisor(),
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, timer);
var lookUp = new SlobrokClient(timer);
var stateGenerator = new StateChangeHandler(timer, log);
var stateBroadcaster = new SystemStateBroadcaster(timer, timer);
var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator,
statusPageServer, null, lookUp, database, stateGenerator,
stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
return running.get();
}
public boolean isMaster() {
synchronized (monitor) {
return isMaster;
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public ClusterStateBundle getClusterStateBundle() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterStateBundle();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) {
throw new NullPointerException("Cluster state should never be null at this point");
}
listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state)));
ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged();
if (convergedState != null) {
listener.handleStateConvergedInCluster(convergedState);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
if (runner != null && isRunning()) {
context.log(logger, Level.INFO, "Joining event thread.");
running.set(false);
synchronized(monitor) { monitor.notifyAll(); }
runner.join();
}
context.log(logger, Level.INFO, "FleetController done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(databaseContext);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options) {
var newId = FleetControllerId.fromOptions(options);
synchronized(monitor) {
assert newId.equals(context.id());
context.log(logger, Level.INFO, "FleetController has new options");
nextOptions = options.clone();
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo);
stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo);
}
private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) {
if (!options.clusterFeedBlockEnabled) {
return;
}
var calc = createResourceExhaustionCalculator();
var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo);
var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo);
if (!previouslyExhausted.equals(nowExhausted)) {
context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s",
previouslyExhausted, nowExhausted));
stateChangeHandler.setStateChangedFlag();
}
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
@Override
public void handleNewPublishedState(ClusterStateBundle stateBundle) {
verifyInControllerThread();
ClusterState baselineState = stateBundle.getBaselineClusterState();
newStates.add(stateBundle);
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
systemStateBroadcaster.handleNewClusterStates(stateBundle);
if (isMaster) {
storeClusterStateMetaDataToZooKeeper(stateBundle);
}
}
private boolean maybePublishOldMetrics() {
verifyInControllerThread();
if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) {
ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle();
ClusterState baselineState = stateBundle.getBaselineClusterState();
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
return true;
} else {
return false;
}
}
private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) {
try {
database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion());
database.saveLatestClusterStateBundle(databaseContext, stateBundle);
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indices of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
boolean wasMaster = isMaster;
masterElectionHandler.lostDatabaseConnection();
if (wasMaster) {
dropLeadershipState();
metricUpdater.updateMasterState(false);
}
}
private void failAllVersionDependentTasks() {
tasksPendingStateRecompute.forEach(task -> {
task.handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.notifyCompleted();
});
tasksPendingStateRecompute.clear();
taskCompletionQueue.forEach(task -> {
task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.getTask().notifyCompleted();
});
taskCompletionQueue.clear();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
var currentBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion()));
stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext);
convergedStates.add(currentBundle);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) {
return true;
}
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() {
verifyInControllerThread();
selfTerminateIfConfiguredNodeIndexHasChanged();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
configuredBucketSpaces = Collections.unmodifiableSet(
Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace())
.collect(Collectors.toSet()));
stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio);
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient) {
((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
}
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty());
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
}
private void selfTerminateIfConfiguredNodeIndexHasChanged() {
var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex);
if (!newId.equals(context.id())) {
context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " +
"immediately exiting now to force new configuration");
prepareShutdownEdge();
System.exit(1);
}
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
final String hiddenMessage;
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
hiddenMessage = "";
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTraceAsString(e);
context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext));
didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState);
didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions);
stateChangeHandler.setMaster(isMaster);
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this));
if ( ! isRunning()) { return; }
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses);
if ( ! isRunning()) { return; }
if (isMaster) {
didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes);
systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest);
if ( ! isRunning()) { return; }
if (rpcServer != null) {
didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this));
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask);
didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks);
didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics);
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime) {
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
}
monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime);
if ( ! isRunning()) { return; }
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
if (isRunning()) {
propagateNewStatesToListeners();
}
}
private boolean updateMasterElectionState() {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to watch master election: " + e);
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() {
if (database.hasPendingClusterStateMetaDataStore()) {
context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores");
return false;
}
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (inMasterMoratorium) {
context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ?
"Master moratorium complete: all nodes have reported in" :
"Master moratorium complete: timed out waiting for all nodes to report in");
firstAllowedStateBroadcast = currentTime;
inMasterMoratorium = false;
}
sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired(
databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf());
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator);
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : newStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleNewPublishedState(stateBundle);
}
}
newStates.clear();
}
}
if ( ! convergedStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : convergedStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleStateConvergedInCluster(stateBundle);
}
}
convergedStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size());
RemoteClusterControllerTask task = remoteTasks.poll();
if (task == null) {
return false;
}
final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext();
context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName()));
task.doRemoteFleetControllerTask(taskContext);
if (taskMayBeCompletedImmediately(task)) {
context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName()));
task.notifyCompleted();
} else {
context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName()));
tasksPendingStateRecompute.add(task);
}
return true;
}
private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) {
return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster);
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentConsolidatedState = consolidatedClusterState();
context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.masterInfo = new MasterInterface() {
@Override public boolean isMaster() { return isMaster; }
@Override public Integer getMaster() { return masterElectionHandler.getMaster(); }
@Override public boolean inMasterMoratorium() { return inMasterMoratorium; }
};
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) {
return bundle.deferredActivation()
? nodeInfo.getClusterStateVersionActivationAcked()
: nodeInfo.getClusterStateVersionBundleAcknowledged();
}
private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) {
var bundle = systemStateBroadcaster.getClusterStateBundle();
if (bundle == null) {
return List.of();
}
return cluster.getNodeInfo().stream().
filter(n -> effectiveActivatedStateVersion(n, bundle) < version).
map(NodeInfo::getNode).
collect(Collectors.toList());
}
private static <E> String stringifyListWithLimits(List<E> list, int limit) {
if (list.size() > limit) {
var sub = list.subList(0, limit);
return String.format("%s (... and %d more)",
sub.stream().map(E::toString).collect(Collectors.joining(", ")),
list.size() - limit);
} else {
return list.stream().map(E::toString).collect(Collectors.joining(", "));
}
}
private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) {
var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion);
if (nodes.isEmpty()) {
return "";
}
return String.format("the following nodes have not converged to at least version %d: %s",
taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages));
}
private boolean completeSatisfiedVersionDependentTasks() {
int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync();
long queueSizeBefore = taskCompletionQueue.size();
final long now = timer.getCurrentTimeInMillis();
while (!taskCompletionQueue.isEmpty()) {
VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek();
if (publishedVersion >= taskCompletion.getMinimumVersion()) {
context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing",
taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else if (taskCompletion.getDeadlineTimePointMs() <= now) {
var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion());
context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)",
taskCompletion.getTask().getClass().getName(), details));
taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else {
break;
}
}
return (taskCompletionQueue.size() != queueSizeBefore);
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext));
didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster));
}
didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this));
didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this));
didWork |= metricUpdater.forWork(
"watchTimers",
() -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this));
didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired);
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
stateChangeHandler.setStateChangedFlag();
}
}
isStateGatherer = true;
return didWork;
}
private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) {
systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle));
}
private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) {
return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported();
}
private boolean recomputeClusterStateIfRequired() {
boolean stateWasChanged = false;
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate)
.bucketSpaces(configuredBucketSpaces)
.stateDeriver(createBucketSpaceStateDeriver())
.deferredActivation(options.enableTwoPhaseClusterStateActivation)
.feedBlock(createResourceExhaustionCalculator()
.inferContentClusterFeedBlockOrNull(cluster.getNodeInfo()))
.deriveAndBuild();
stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle);
invokeCandidateStateListeners(candidateBundle);
final long timeNowMs = timer.getCurrentTimeInMillis();
if (hasPassedFirstStateBroadcastTimePoint(timeNowMs)
&& (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper()))
{
final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs);
handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle());
stateWasChanged = true;
}
}
/*
* This works transparently for tasks that end up changing the current cluster state (i.e.
* requiring a new state to be published) and for those whose changes are no-ops (because
* the changes they request are already part of the current state). In the former case the
* tasks will depend on the version that was generated based upon them. In the latter case
* the tasks will depend on the version that is already published (or in the process of
* being published).
*/
scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion());
return stateWasChanged;
}
private ClusterStateDeriver createBucketSpaceStateDeriver() {
if (options.clusterHasGlobalDocumentTypes) {
return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(),
createDefaultSpaceMaintenanceTransitionConstraint());
} else {
return createIdentityClonedBucketSpaceStateDeriver();
}
}
private ResourceExhaustionCalculator createResourceExhaustionCalculator() {
return new ResourceExhaustionCalculator(
options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit,
stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(),
options.clusterFeedBlockNoiseLevel);
}
private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() {
return (state, space) -> state.clone();
}
private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() {
AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle()
.getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState());
return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState());
}
/**
* Move tasks that are dependent on the most recently generated state being published into
* a completion queue with a dependency on the provided version argument. Once that version
* has been ACKed by all distributors in the system, those tasks will be marked as completed.
*/
private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) {
final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis();
for (RemoteClusterControllerTask task : tasksPendingStateRecompute) {
context.log(logger, Level.FINEST, () -> String.format("Adding task of type '%s' to be completed at version %d",
task.getClass().getName(), completeAtVersion));
taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs));
}
tasksPendingStateRecompute.clear();
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState,
final ClusterStateBundle toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs)
.maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs()));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean atFirstClusterStateSendTimeEdge() {
if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) {
return false;
}
return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis());
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged()
|| stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged()
|| atFirstClusterStateSendTimeEdge();
}
private boolean handleLeadershipEdgeTransitions() {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
stateChangeHandler.setStateChangedFlag();
systemStateBroadcaster.resetBroadcastedClusterStateBundle();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
ClusterStateBundle previousBundle = database.getLatestClusterStateBundle();
database.loadStartTimestamps(cluster);
database.loadWantedStates(databaseContext);
context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle));
stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
isMaster = true;
inMasterMoratorium = true;
context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
didWork = true;
}
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
dropLeadershipState();
}
metricUpdater.updateMasterState(isMaster);
return didWork;
}
private void dropLeadershipState() {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
failAllVersionDependentTasks();
}
wantedStateChanged = false;
isMaster = false;
inMasterMoratorium = false;
}
@Override
private void prepareShutdownEdge() {
running.set(false);
failAllVersionDependentTasks();
synchronized (monitor) { monitor.notifyAll(); }
}
public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getClusterStateVersionBundleAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static final Logger logger = Logger.getLogger(FleetController.class.getName());
private final FleetControllerContext context;
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private final AtomicBoolean running = new AtomicBoolean(true);
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long lastMetricUpdateCycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<ClusterStateBundle> newStates = new ArrayList<>();
private final List<ClusterStateBundle> convergedStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean inMasterMoratorium = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>();
private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>();
private Set<String> configuredBucketSpaces = Collections.emptySet();
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(FleetControllerContext context,
Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) {
context.log(logger, Level.INFO, "Created");
this.context = context;
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController create(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
var context = new FleetControllerContextImpl(options);
var timer = new RealTimer();
var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName);
var log = new EventLog(timer, metricUpdater);
var cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution);
var stateGatherer = new NodeStateGatherer(timer, timer, log);
var communicator = new RPCCommunicator(
RPCCommunicator.createRealSupervisor(),
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, timer);
var lookUp = new SlobrokClient(timer);
var stateGenerator = new StateChangeHandler(timer, log);
var stateBroadcaster = new SystemStateBroadcaster(timer, timer);
var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator,
statusPageServer, null, lookUp, database, stateGenerator,
stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
return running.get();
}
public boolean isMaster() {
synchronized (monitor) {
return isMaster;
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public ClusterStateBundle getClusterStateBundle() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterStateBundle();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) {
throw new NullPointerException("Cluster state should never be null at this point");
}
listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state)));
ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged();
if (convergedState != null) {
listener.handleStateConvergedInCluster(convergedState);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
if (runner != null && isRunning()) {
context.log(logger, Level.INFO, "Joining event thread.");
running.set(false);
synchronized(monitor) { monitor.notifyAll(); }
runner.join();
}
context.log(logger, Level.INFO, "FleetController done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(databaseContext);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
var newId = FleetControllerId.fromOptions(options);
synchronized(monitor) {
assert newId.equals(context.id());
context.log(logger, Level.INFO, "FleetController has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo);
stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo);
}
private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) {
if (!options.clusterFeedBlockEnabled) {
return;
}
var calc = createResourceExhaustionCalculator();
var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo);
var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo);
if (!previouslyExhausted.equals(nowExhausted)) {
context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s",
previouslyExhausted, nowExhausted));
stateChangeHandler.setStateChangedFlag();
}
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
@Override
public void handleNewPublishedState(ClusterStateBundle stateBundle) {
verifyInControllerThread();
ClusterState baselineState = stateBundle.getBaselineClusterState();
newStates.add(stateBundle);
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
systemStateBroadcaster.handleNewClusterStates(stateBundle);
if (isMaster) {
storeClusterStateMetaDataToZooKeeper(stateBundle);
}
}
private boolean maybePublishOldMetrics() {
verifyInControllerThread();
if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) {
ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle();
ClusterState baselineState = stateBundle.getBaselineClusterState();
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
return true;
} else {
return false;
}
}
private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) {
try {
database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion());
database.saveLatestClusterStateBundle(databaseContext, stateBundle);
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indices of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
boolean wasMaster = isMaster;
masterElectionHandler.lostDatabaseConnection();
if (wasMaster) {
dropLeadershipState();
metricUpdater.updateMasterState(false);
}
}
private void failAllVersionDependentTasks() {
tasksPendingStateRecompute.forEach(task -> {
task.handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.notifyCompleted();
});
tasksPendingStateRecompute.clear();
taskCompletionQueue.forEach(task -> {
task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.getTask().notifyCompleted();
});
taskCompletionQueue.clear();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
var currentBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion()));
stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext);
convergedStates.add(currentBundle);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) {
return true;
}
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() {
verifyInControllerThread();
selfTerminateIfConfiguredNodeIndexHasChanged();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
configuredBucketSpaces = Collections.unmodifiableSet(
Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace())
.collect(Collectors.toSet()));
stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio);
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient) {
((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
}
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty());
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
private void selfTerminateIfConfiguredNodeIndexHasChanged() {
var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex);
if (!newId.equals(context.id())) {
context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " +
"immediately exiting now to force new configuration");
prepareShutdownEdge();
System.exit(1);
}
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
final String hiddenMessage;
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
hiddenMessage = "";
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTraceAsString(e);
context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext));
didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState);
didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions);
stateChangeHandler.setMaster(isMaster);
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this));
if ( ! isRunning()) { return; }
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses);
if ( ! isRunning()) { return; }
if (isMaster) {
didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes);
systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest);
if ( ! isRunning()) { return; }
if (rpcServer != null) {
didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this));
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask);
didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks);
didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics);
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime) {
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
}
monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime);
if ( ! isRunning()) { return; }
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
if (isRunning()) {
propagateNewStatesToListeners();
}
}
private boolean updateMasterElectionState() {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to watch master election: " + e);
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() {
if (database.hasPendingClusterStateMetaDataStore()) {
context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores");
return false;
}
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (inMasterMoratorium) {
context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ?
"Master moratorium complete: all nodes have reported in" :
"Master moratorium complete: timed out waiting for all nodes to report in");
firstAllowedStateBroadcast = currentTime;
inMasterMoratorium = false;
}
sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired(
databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf());
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator);
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : newStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleNewPublishedState(stateBundle);
}
}
newStates.clear();
}
}
if ( ! convergedStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : convergedStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleStateConvergedInCluster(stateBundle);
}
}
convergedStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size());
RemoteClusterControllerTask task = remoteTasks.poll();
if (task == null) {
return false;
}
final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext();
context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName()));
task.doRemoteFleetControllerTask(taskContext);
if (taskMayBeCompletedImmediately(task)) {
context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName()));
task.notifyCompleted();
} else {
context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName()));
tasksPendingStateRecompute.add(task);
}
return true;
}
private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) {
return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster);
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentConsolidatedState = consolidatedClusterState();
context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.masterInfo = new MasterInterface() {
@Override public boolean isMaster() { return isMaster; }
@Override public Integer getMaster() { return masterElectionHandler.getMaster(); }
@Override public boolean inMasterMoratorium() { return inMasterMoratorium; }
};
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) {
return bundle.deferredActivation()
? nodeInfo.getClusterStateVersionActivationAcked()
: nodeInfo.getClusterStateVersionBundleAcknowledged();
}
private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) {
var bundle = systemStateBroadcaster.getClusterStateBundle();
if (bundle == null) {
return List.of();
}
return cluster.getNodeInfo().stream().
filter(n -> effectiveActivatedStateVersion(n, bundle) < version).
map(NodeInfo::getNode).
collect(Collectors.toList());
}
private static <E> String stringifyListWithLimits(List<E> list, int limit) {
if (list.size() > limit) {
var sub = list.subList(0, limit);
return String.format("%s (... and %d more)",
sub.stream().map(E::toString).collect(Collectors.joining(", ")),
list.size() - limit);
} else {
return list.stream().map(E::toString).collect(Collectors.joining(", "));
}
}
private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) {
var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion);
if (nodes.isEmpty()) {
return "";
}
return String.format("the following nodes have not converged to at least version %d: %s",
taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages));
}
private boolean completeSatisfiedVersionDependentTasks() {
int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync();
long queueSizeBefore = taskCompletionQueue.size();
final long now = timer.getCurrentTimeInMillis();
while (!taskCompletionQueue.isEmpty()) {
VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek();
if (publishedVersion >= taskCompletion.getMinimumVersion()) {
context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing",
taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else if (taskCompletion.getDeadlineTimePointMs() <= now) {
var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion());
context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)",
taskCompletion.getTask().getClass().getName(), details));
taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else {
break;
}
}
return (taskCompletionQueue.size() != queueSizeBefore);
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext));
didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster));
}
didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this));
didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this));
didWork |= metricUpdater.forWork(
"watchTimers",
() -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this));
didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired);
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
stateChangeHandler.setStateChangedFlag();
}
}
isStateGatherer = true;
return didWork;
}
private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) {
systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle));
}
private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) {
return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported();
}
private boolean recomputeClusterStateIfRequired() {
boolean stateWasChanged = false;
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate)
.bucketSpaces(configuredBucketSpaces)
.stateDeriver(createBucketSpaceStateDeriver())
.deferredActivation(options.enableTwoPhaseClusterStateActivation)
.feedBlock(createResourceExhaustionCalculator()
.inferContentClusterFeedBlockOrNull(cluster.getNodeInfo()))
.deriveAndBuild();
stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle);
invokeCandidateStateListeners(candidateBundle);
final long timeNowMs = timer.getCurrentTimeInMillis();
if (hasPassedFirstStateBroadcastTimePoint(timeNowMs)
&& (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper()))
{
final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs);
handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle());
stateWasChanged = true;
}
}
/*
* This works transparently for tasks that end up changing the current cluster state (i.e.
* requiring a new state to be published) and for those whose changes are no-ops (because
* the changes they request are already part of the current state). In the former case the
* tasks will depend on the version that was generated based upon them. In the latter case
* the tasks will depend on the version that is already published (or in the process of
* being published).
*/
scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion());
return stateWasChanged;
}
private ClusterStateDeriver createBucketSpaceStateDeriver() {
if (options.clusterHasGlobalDocumentTypes) {
return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(),
createDefaultSpaceMaintenanceTransitionConstraint());
} else {
return createIdentityClonedBucketSpaceStateDeriver();
}
}
private ResourceExhaustionCalculator createResourceExhaustionCalculator() {
return new ResourceExhaustionCalculator(
options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit,
stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(),
options.clusterFeedBlockNoiseLevel);
}
private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() {
return (state, space) -> state.clone();
}
private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() {
AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle()
.getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState());
return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState());
}
/**
* Move tasks that are dependent on the most recently generated state being published into
* a completion queue with a dependency on the provided version argument. Once that version
* has been ACKed by all distributors in the system, those tasks will be marked as completed.
*/
private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) {
final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis();
for (RemoteClusterControllerTask task : tasksPendingStateRecompute) {
context.log(logger, Level.FINEST, () -> String.format("Adding task of type '%s' to be completed at version %d",
task.getClass().getName(), completeAtVersion));
taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs));
}
tasksPendingStateRecompute.clear();
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState,
final ClusterStateBundle toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs)
.maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs()));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean atFirstClusterStateSendTimeEdge() {
if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) {
return false;
}
return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis());
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged()
|| stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged()
|| atFirstClusterStateSendTimeEdge();
}
private boolean handleLeadershipEdgeTransitions() {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
stateChangeHandler.setStateChangedFlag();
systemStateBroadcaster.resetBroadcastedClusterStateBundle();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
ClusterStateBundle previousBundle = database.getLatestClusterStateBundle();
database.loadStartTimestamps(cluster);
database.loadWantedStates(databaseContext);
context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle));
stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
isMaster = true;
inMasterMoratorium = true;
context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
didWork = true;
}
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
dropLeadershipState();
}
metricUpdater.updateMasterState(isMaster);
return didWork;
}
private void dropLeadershipState() {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
failAllVersionDependentTasks();
}
wantedStateChanged = false;
isMaster = false;
inMasterMoratorium = false;
}
@Override
private void prepareShutdownEdge() {
running.set(false);
failAllVersionDependentTasks();
synchronized (monitor) { monitor.notifyAll(); }
}
public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getClusterStateVersionBundleAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} |
I think so. Let me also make sure to log at at >= INFO when the tick loop exists. This way we know the range of time a FleetController actually does something, which may not be obvious during e.g. reconfig. | public void run() {
controllerThreadId = Thread.currentThread().getId();
context.log(logger, Level.INFO, "Starting ticks");
try {
processingCycle = true;
while (isRunning()) {
tick();
}
} catch (InterruptedException e) {
context.log(logger, Level.FINE, () -> "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t);
synchronized (monitor) { running.set(false); }
System.exit(1);
} finally {
prepareShutdownEdge();
}
} | context.log(logger, Level.INFO, "Starting ticks"); | public void run() {
controllerThreadId = Thread.currentThread().getId();
context.log(logger, Level.INFO, "Starting tick loop");
try {
processingCycle = true;
while (isRunning()) {
tick();
}
context.log(logger, Level.INFO, "Tick loop stopped");
} catch (InterruptedException e) {
context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e);
} catch (Throwable t) {
t.printStackTrace();
context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t);
synchronized (monitor) { running.set(false); }
System.exit(1);
} finally {
prepareShutdownEdge();
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static final Logger logger = Logger.getLogger(FleetController.class.getName());
private final Context context;
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private final AtomicBoolean running = new AtomicBoolean(true);
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long lastMetricUpdateCycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<ClusterStateBundle> newStates = new ArrayList<>();
private final List<ClusterStateBundle> convergedStates = new ArrayList<>();
private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean inMasterMoratorium = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>();
private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>();
private Set<String> configuredBucketSpaces = Collections.emptySet();
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return 0; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Context context,
Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) {
context.log(logger, Level.INFO, "Created");
this.context = context;
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController create(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
var context = new ContextImpl(options);
var timer = new RealTimer();
var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName);
var log = new EventLog(timer, metricUpdater);
var cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution);
var stateGatherer = new NodeStateGatherer(timer, timer, log);
var communicator = new RPCCommunicator(
RPCCommunicator.createRealSupervisor(),
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, timer);
var lookUp = new SlobrokClient(timer);
var stateGenerator = new StateChangeHandler(timer, log);
var stateBroadcaster = new SystemStateBroadcaster(timer, timer);
var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator,
statusPageServer, null, lookUp, database, stateGenerator,
stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
return running.get();
}
public boolean isMaster() {
synchronized (monitor) {
return isMaster;
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public ClusterStateBundle getClusterStateBundle() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterStateBundle();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) {
throw new NullPointerException("Cluster state should never be null at this point");
}
listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state)));
ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged();
if (convergedState != null) {
listener.handleStateConvergedInCluster(convergedState);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
if (runner != null && isRunning()) {
context.log(logger, Level.INFO, "Joining event thread.");
running.set(false);
synchronized(monitor) { monitor.notifyAll(); }
runner.join();
}
context.log(logger, Level.INFO, "FleetController done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(databaseContext);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options) {
var newId = FleetControllerId.fromOptions(options);
synchronized(monitor) {
assert newId.equals(context.id());
context.log(logger, Level.INFO, "FleetController has new options");
nextOptions = options.clone();
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo);
stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo);
}
private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) {
if (!options.clusterFeedBlockEnabled) {
return;
}
var calc = createResourceExhaustionCalculator();
var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo);
var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo);
if (!previouslyExhausted.equals(nowExhausted)) {
context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s",
previouslyExhausted, nowExhausted));
stateChangeHandler.setStateChangedFlag();
}
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
@Override
public void handleNewPublishedState(ClusterStateBundle stateBundle) {
verifyInControllerThread();
ClusterState baselineState = stateBundle.getBaselineClusterState();
newStates.add(stateBundle);
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
systemStateBroadcaster.handleNewClusterStates(stateBundle);
if (isMaster) {
storeClusterStateMetaDataToZooKeeper(stateBundle);
}
}
private boolean maybePublishOldMetrics() {
verifyInControllerThread();
if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) {
ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle();
ClusterState baselineState = stateBundle.getBaselineClusterState();
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
return true;
} else {
return false;
}
}
private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) {
try {
database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion());
database.saveLatestClusterStateBundle(databaseContext, stateBundle);
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indices of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
boolean wasMaster = isMaster;
masterElectionHandler.lostDatabaseConnection();
if (wasMaster) {
dropLeadershipState();
metricUpdater.updateMasterState(false);
}
}
private void failAllVersionDependentTasks() {
tasksPendingStateRecompute.forEach(task -> {
task.handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.notifyCompleted();
});
tasksPendingStateRecompute.clear();
taskCompletionQueue.forEach(task -> {
task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.getTask().notifyCompleted();
});
taskCompletionQueue.clear();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
var currentBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion()));
stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext);
convergedStates.add(currentBundle);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) {
return true;
}
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() {
verifyInControllerThread();
selfTerminateIfConfiguredNodeIndexHasChanged();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
configuredBucketSpaces = Collections.unmodifiableSet(
Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace())
.collect(Collectors.toSet()));
stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio);
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient) {
((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
}
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty());
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
}
private void selfTerminateIfConfiguredNodeIndexHasChanged() {
var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex);
if (!newId.equals(context.id())) {
context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " +
"immediately exiting now to force new configuration");
prepareShutdownEdge();
System.exit(1);
}
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
final String hiddenMessage;
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
hiddenMessage = "";
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTraceAsString(e);
context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext));
didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState);
didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions);
stateChangeHandler.setMaster(isMaster);
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this));
if ( ! isRunning()) { return; }
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses);
if ( ! isRunning()) { return; }
if (isMaster) {
didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes);
systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest);
if ( ! isRunning()) { return; }
if (rpcServer != null) {
didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this));
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask);
didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks);
didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics);
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime) {
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
}
monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime);
if ( ! isRunning()) { return; }
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
if (isRunning()) {
propagateNewStatesToListeners();
}
}
private boolean updateMasterElectionState() {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to watch master election: " + e);
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() {
if (database.hasPendingClusterStateMetaDataStore()) {
context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores");
return false;
}
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (inMasterMoratorium) {
context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ?
"Master moratorium complete: all nodes have reported in" :
"Master moratorium complete: timed out waiting for all nodes to report in");
firstAllowedStateBroadcast = currentTime;
inMasterMoratorium = false;
}
sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired(
databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf());
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator);
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : newStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleNewPublishedState(stateBundle);
}
}
newStates.clear();
}
}
if ( ! convergedStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : convergedStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleStateConvergedInCluster(stateBundle);
}
}
convergedStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size());
RemoteClusterControllerTask task = remoteTasks.poll();
if (task == null) {
return false;
}
final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext();
context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName()));
task.doRemoteFleetControllerTask(taskContext);
if (taskMayBeCompletedImmediately(task)) {
context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName()));
task.notifyCompleted();
} else {
context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName()));
tasksPendingStateRecompute.add(task);
}
return true;
}
private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) {
return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster);
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentConsolidatedState = consolidatedClusterState();
context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.masterInfo = new MasterInterface() {
@Override public boolean isMaster() { return isMaster; }
@Override public Integer getMaster() { return masterElectionHandler.getMaster(); }
@Override public boolean inMasterMoratorium() { return inMasterMoratorium; }
};
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) {
return bundle.deferredActivation()
? nodeInfo.getClusterStateVersionActivationAcked()
: nodeInfo.getClusterStateVersionBundleAcknowledged();
}
private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) {
var bundle = systemStateBroadcaster.getClusterStateBundle();
if (bundle == null) {
return List.of();
}
return cluster.getNodeInfo().stream().
filter(n -> effectiveActivatedStateVersion(n, bundle) < version).
map(NodeInfo::getNode).
collect(Collectors.toList());
}
private static <E> String stringifyListWithLimits(List<E> list, int limit) {
if (list.size() > limit) {
var sub = list.subList(0, limit);
return String.format("%s (... and %d more)",
sub.stream().map(E::toString).collect(Collectors.joining(", ")),
list.size() - limit);
} else {
return list.stream().map(E::toString).collect(Collectors.joining(", "));
}
}
private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) {
var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion);
if (nodes.isEmpty()) {
return "";
}
return String.format("the following nodes have not converged to at least version %d: %s",
taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages));
}
private boolean completeSatisfiedVersionDependentTasks() {
int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync();
long queueSizeBefore = taskCompletionQueue.size();
final long now = timer.getCurrentTimeInMillis();
while (!taskCompletionQueue.isEmpty()) {
VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek();
if (publishedVersion >= taskCompletion.getMinimumVersion()) {
context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing",
taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else if (taskCompletion.getDeadlineTimePointMs() <= now) {
var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion());
context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)",
taskCompletion.getTask().getClass().getName(), details));
taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else {
break;
}
}
return (taskCompletionQueue.size() != queueSizeBefore);
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext));
didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster));
}
didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this));
didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this));
didWork |= metricUpdater.forWork(
"watchTimers",
() -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this));
didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired);
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
stateChangeHandler.setStateChangedFlag();
}
}
isStateGatherer = true;
return didWork;
}
private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) {
systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle));
}
private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) {
return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported();
}
private boolean recomputeClusterStateIfRequired() {
boolean stateWasChanged = false;
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate)
.bucketSpaces(configuredBucketSpaces)
.stateDeriver(createBucketSpaceStateDeriver())
.deferredActivation(options.enableTwoPhaseClusterStateActivation)
.feedBlock(createResourceExhaustionCalculator()
.inferContentClusterFeedBlockOrNull(cluster.getNodeInfo()))
.deriveAndBuild();
stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle);
invokeCandidateStateListeners(candidateBundle);
final long timeNowMs = timer.getCurrentTimeInMillis();
if (hasPassedFirstStateBroadcastTimePoint(timeNowMs)
&& (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper()))
{
final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs);
handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle());
stateWasChanged = true;
}
}
/*
* This works transparently for tasks that end up changing the current cluster state (i.e.
* requiring a new state to be published) and for those whose changes are no-ops (because
* the changes they request are already part of the current state). In the former case the
* tasks will depend on the version that was generated based upon them. In the latter case
* the tasks will depend on the version that is already published (or in the process of
* being published).
*/
scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion());
return stateWasChanged;
}
private ClusterStateDeriver createBucketSpaceStateDeriver() {
if (options.clusterHasGlobalDocumentTypes) {
return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(),
createDefaultSpaceMaintenanceTransitionConstraint());
} else {
return createIdentityClonedBucketSpaceStateDeriver();
}
}
private ResourceExhaustionCalculator createResourceExhaustionCalculator() {
return new ResourceExhaustionCalculator(
options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit,
stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(),
options.clusterFeedBlockNoiseLevel);
}
private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() {
return (state, space) -> state.clone();
}
private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() {
AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle()
.getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState());
return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState());
}
/**
* Move tasks that are dependent on the most recently generated state being published into
* a completion queue with a dependency on the provided version argument. Once that version
* has been ACKed by all distributors in the system, those tasks will be marked as completed.
*/
private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) {
final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis();
for (RemoteClusterControllerTask task : tasksPendingStateRecompute) {
context.log(logger, Level.FINEST, () -> String.format("Adding task of type '%s' to be completed at version %d",
task.getClass().getName(), completeAtVersion));
taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs));
}
tasksPendingStateRecompute.clear();
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState,
final ClusterStateBundle toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs)
.maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs()));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean atFirstClusterStateSendTimeEdge() {
if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) {
return false;
}
return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis());
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged()
|| stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged()
|| atFirstClusterStateSendTimeEdge();
}
private boolean handleLeadershipEdgeTransitions() {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
stateChangeHandler.setStateChangedFlag();
systemStateBroadcaster.resetBroadcastedClusterStateBundle();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
ClusterStateBundle previousBundle = database.getLatestClusterStateBundle();
database.loadStartTimestamps(cluster);
database.loadWantedStates(databaseContext);
context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle));
stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
isMaster = true;
inMasterMoratorium = true;
context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
didWork = true;
}
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
dropLeadershipState();
}
metricUpdater.updateMasterState(isMaster);
return didWork;
}
private void dropLeadershipState() {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
failAllVersionDependentTasks();
}
wantedStateChanged = false;
isMaster = false;
inMasterMoratorium = false;
}
@Override
private void prepareShutdownEdge() {
running.set(false);
failAllVersionDependentTasks();
synchronized (monitor) { monitor.notifyAll(); }
}
public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getClusterStateVersionBundleAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static final Logger logger = Logger.getLogger(FleetController.class.getName());
private final FleetControllerContext context;
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private final AtomicBoolean running = new AtomicBoolean(true);
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long lastMetricUpdateCycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<ClusterStateBundle> newStates = new ArrayList<>();
private final List<ClusterStateBundle> convergedStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean inMasterMoratorium = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>();
private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>();
private Set<String> configuredBucketSpaces = Collections.emptySet();
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(FleetControllerContext context,
Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) {
context.log(logger, Level.INFO, "Created");
this.context = context;
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController create(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
var context = new FleetControllerContextImpl(options);
var timer = new RealTimer();
var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName);
var log = new EventLog(timer, metricUpdater);
var cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution);
var stateGatherer = new NodeStateGatherer(timer, timer, log);
var communicator = new RPCCommunicator(
RPCCommunicator.createRealSupervisor(),
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, timer);
var lookUp = new SlobrokClient(timer);
var stateGenerator = new StateChangeHandler(timer, log);
var stateBroadcaster = new SystemStateBroadcaster(timer, timer);
var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator,
statusPageServer, null, lookUp, database, stateGenerator,
stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
return running.get();
}
public boolean isMaster() {
synchronized (monitor) {
return isMaster;
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public ClusterStateBundle getClusterStateBundle() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterStateBundle();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) {
throw new NullPointerException("Cluster state should never be null at this point");
}
listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state)));
ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged();
if (convergedState != null) {
listener.handleStateConvergedInCluster(convergedState);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
if (runner != null && isRunning()) {
context.log(logger, Level.INFO, "Joining event thread.");
running.set(false);
synchronized(monitor) { monitor.notifyAll(); }
runner.join();
}
context.log(logger, Level.INFO, "FleetController done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(databaseContext);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
var newId = FleetControllerId.fromOptions(options);
synchronized(monitor) {
assert newId.equals(context.id());
context.log(logger, Level.INFO, "FleetController has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo);
stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo);
}
private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) {
if (!options.clusterFeedBlockEnabled) {
return;
}
var calc = createResourceExhaustionCalculator();
var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo);
var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo);
if (!previouslyExhausted.equals(nowExhausted)) {
context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s",
previouslyExhausted, nowExhausted));
stateChangeHandler.setStateChangedFlag();
}
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
@Override
public void handleNewPublishedState(ClusterStateBundle stateBundle) {
verifyInControllerThread();
ClusterState baselineState = stateBundle.getBaselineClusterState();
newStates.add(stateBundle);
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
systemStateBroadcaster.handleNewClusterStates(stateBundle);
if (isMaster) {
storeClusterStateMetaDataToZooKeeper(stateBundle);
}
}
private boolean maybePublishOldMetrics() {
verifyInControllerThread();
if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) {
ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle();
ClusterState baselineState = stateBundle.getBaselineClusterState();
metricUpdater.updateClusterStateMetrics(cluster, baselineState,
ResourceUsageStats.calculateFrom(cluster.getNodeInfo(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock()));
lastMetricUpdateCycleCount = cycleCount;
return true;
} else {
return false;
}
}
private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) {
try {
database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion());
database.saveLatestClusterStateBundle(databaseContext, stateBundle);
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indices of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
boolean wasMaster = isMaster;
masterElectionHandler.lostDatabaseConnection();
if (wasMaster) {
dropLeadershipState();
metricUpdater.updateMasterState(false);
}
}
private void failAllVersionDependentTasks() {
tasksPendingStateRecompute.forEach(task -> {
task.handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.notifyCompleted();
});
tasksPendingStateRecompute.clear();
taskCompletionQueue.forEach(task -> {
task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST));
task.getTask().notifyCompleted();
});
taskCompletionQueue.clear();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
var currentBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion()));
stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext);
convergedStates.add(currentBundle);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) {
return true;
}
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() {
verifyInControllerThread();
selfTerminateIfConfiguredNodeIndexHasChanged();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
configuredBucketSpaces = Collections.unmodifiableSet(
Stream.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace())
.collect(Collectors.toSet()));
stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio);
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient) {
((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
}
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty());
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
private void selfTerminateIfConfiguredNodeIndexHasChanged() {
var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex);
if (!newId.equals(context.id())) {
context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " +
"immediately exiting now to force new configuration");
prepareShutdownEdge();
System.exit(1);
}
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
final String hiddenMessage;
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
hiddenMessage = "";
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTraceAsString(e);
context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext));
didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState);
didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions);
stateChangeHandler.setMaster(isMaster);
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this));
if ( ! isRunning()) { return; }
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses);
if ( ! isRunning()) { return; }
if (isMaster) {
didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes);
systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest);
if ( ! isRunning()) { return; }
if (rpcServer != null) {
didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this));
}
if ( ! isRunning()) { return; }
didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask);
didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks);
didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics);
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime) {
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
}
monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime);
if ( ! isRunning()) { return; }
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
if (isRunning()) {
propagateNewStatesToListeners();
}
}
private boolean updateMasterElectionState() {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (Exception e) {
context.log(logger, Level.WARNING, "Failed to watch master election: " + e);
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() {
if (database.hasPendingClusterStateMetaDataStore()) {
context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores");
return false;
}
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (inMasterMoratorium) {
context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ?
"Master moratorium complete: all nodes have reported in" :
"Master moratorium complete: timed out waiting for all nodes to report in");
firstAllowedStateBroadcast = currentTime;
inMasterMoratorium = false;
}
sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired(
databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf());
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator);
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : newStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleNewPublishedState(stateBundle);
}
}
newStates.clear();
}
}
if ( ! convergedStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterStateBundle stateBundle : convergedStates) {
for (SystemStateListener listener : systemStateListeners) {
listener.handleStateConvergedInCluster(stateBundle);
}
}
convergedStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size());
RemoteClusterControllerTask task = remoteTasks.poll();
if (task == null) {
return false;
}
final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext();
context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName()));
task.doRemoteFleetControllerTask(taskContext);
if (taskMayBeCompletedImmediately(task)) {
context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName()));
task.notifyCompleted();
} else {
context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName()));
tasksPendingStateRecompute.add(task);
}
return true;
}
private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) {
return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster);
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentConsolidatedState = consolidatedClusterState();
context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle();
context.masterInfo = new MasterInterface() {
@Override public boolean isMaster() { return isMaster; }
@Override public Integer getMaster() { return masterElectionHandler.getMaster(); }
@Override public boolean inMasterMoratorium() { return inMasterMoratorium; }
};
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) {
return bundle.deferredActivation()
? nodeInfo.getClusterStateVersionActivationAcked()
: nodeInfo.getClusterStateVersionBundleAcknowledged();
}
private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) {
var bundle = systemStateBroadcaster.getClusterStateBundle();
if (bundle == null) {
return List.of();
}
return cluster.getNodeInfo().stream().
filter(n -> effectiveActivatedStateVersion(n, bundle) < version).
map(NodeInfo::getNode).
collect(Collectors.toList());
}
private static <E> String stringifyListWithLimits(List<E> list, int limit) {
if (list.size() > limit) {
var sub = list.subList(0, limit);
return String.format("%s (... and %d more)",
sub.stream().map(E::toString).collect(Collectors.joining(", ")),
list.size() - limit);
} else {
return list.stream().map(E::toString).collect(Collectors.joining(", "));
}
}
private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) {
var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion);
if (nodes.isEmpty()) {
return "";
}
return String.format("the following nodes have not converged to at least version %d: %s",
taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages));
}
private boolean completeSatisfiedVersionDependentTasks() {
int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync();
long queueSizeBefore = taskCompletionQueue.size();
final long now = timer.getCurrentTimeInMillis();
while (!taskCompletionQueue.isEmpty()) {
VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek();
if (publishedVersion >= taskCompletion.getMinimumVersion()) {
context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing",
taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else if (taskCompletion.getDeadlineTimePointMs() <= now) {
var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion());
context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)",
taskCompletion.getTask().getClass().getName(), details));
taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of(
RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details));
taskCompletion.getTask().notifyCompleted();
taskCompletionQueue.remove();
} else {
break;
}
}
return (taskCompletionQueue.size() != queueSizeBefore);
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext));
didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster));
}
didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this));
didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this));
didWork |= metricUpdater.forWork(
"watchTimers",
() -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this));
didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired);
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
stateChangeHandler.setStateChangedFlag();
}
}
isStateGatherer = true;
return didWork;
}
private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) {
systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle));
}
private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) {
return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported();
}
private boolean recomputeClusterStateIfRequired() {
boolean stateWasChanged = false;
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate)
.bucketSpaces(configuredBucketSpaces)
.stateDeriver(createBucketSpaceStateDeriver())
.deferredActivation(options.enableTwoPhaseClusterStateActivation)
.feedBlock(createResourceExhaustionCalculator()
.inferContentClusterFeedBlockOrNull(cluster.getNodeInfo()))
.deriveAndBuild();
stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle);
invokeCandidateStateListeners(candidateBundle);
final long timeNowMs = timer.getCurrentTimeInMillis();
if (hasPassedFirstStateBroadcastTimePoint(timeNowMs)
&& (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper()))
{
final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs);
handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle());
stateWasChanged = true;
}
}
/*
* This works transparently for tasks that end up changing the current cluster state (i.e.
* requiring a new state to be published) and for those whose changes are no-ops (because
* the changes they request are already part of the current state). In the former case the
* tasks will depend on the version that was generated based upon them. In the latter case
* the tasks will depend on the version that is already published (or in the process of
* being published).
*/
scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion());
return stateWasChanged;
}
private ClusterStateDeriver createBucketSpaceStateDeriver() {
if (options.clusterHasGlobalDocumentTypes) {
return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(),
createDefaultSpaceMaintenanceTransitionConstraint());
} else {
return createIdentityClonedBucketSpaceStateDeriver();
}
}
private ResourceExhaustionCalculator createResourceExhaustionCalculator() {
return new ResourceExhaustionCalculator(
options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit,
stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(),
options.clusterFeedBlockNoiseLevel);
}
private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() {
return (state, space) -> state.clone();
}
private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() {
AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle()
.getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState());
return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState());
}
/**
* Move tasks that are dependent on the most recently generated state being published into
* a completion queue with a dependency on the provided version argument. Once that version
* has been ACKed by all distributors in the system, those tasks will be marked as completed.
*/
private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) {
final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis();
for (RemoteClusterControllerTask task : tasksPendingStateRecompute) {
context.log(logger, Level.FINEST, () -> String.format("Adding task of type '%s' to be completed at version %d",
task.getClass().getName(), completeAtVersion));
taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs));
}
tasksPendingStateRecompute.clear();
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState,
final ClusterStateBundle toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs)
.maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs()));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean atFirstClusterStateSendTimeEdge() {
if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) {
return false;
}
return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis());
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged()
|| stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged()
|| atFirstClusterStateSendTimeEdge();
}
private boolean handleLeadershipEdgeTransitions() {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
stateChangeHandler.setStateChangedFlag();
systemStateBroadcaster.resetBroadcastedClusterStateBundle();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
ClusterStateBundle previousBundle = database.getLatestClusterStateBundle();
database.loadStartTimestamps(cluster);
database.loadWantedStates(databaseContext);
context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle));
stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
isMaster = true;
inMasterMoratorium = true;
context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
didWork = true;
}
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
dropLeadershipState();
}
metricUpdater.updateMasterState(isMaster);
return didWork;
}
private void dropLeadershipState() {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
failAllVersionDependentTasks();
}
wantedStateChanged = false;
isMaster = false;
inMasterMoratorium = false;
}
@Override
private void prepareShutdownEdge() {
running.set(false);
failAllVersionDependentTasks();
synchronized (monitor) { monitor.notifyAll(); }
}
public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getClusterStateVersionBundleAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} |
Does this still need to be done? | public static ContainerFileSystem create(Path containerStorageRoot, UserNamespace userNamespace, VespaUser vespaUser) {
ContainerFileSystem containerFs = new ContainerFileSystemProvider(containerStorageRoot, userNamespace, vespaUser).getFileSystem(null);
containerFs.createRoot();
return containerFs;
} | containerFs.createRoot(); | public static ContainerFileSystem create(Path containerStorageRoot, UserNamespace userNamespace, VespaUser vespaUser) {
return new ContainerFileSystemProvider(containerStorageRoot, userNamespace, vespaUser).getFileSystem(null);
} | class ContainerFileSystem extends FileSystem {
private final ContainerFileSystemProvider containerFsProvider;
private final Path containerRootOnHost;
ContainerFileSystem(ContainerFileSystemProvider containerFsProvider, Path containerRootOnHost) {
this.containerFsProvider = containerFsProvider;
this.containerRootOnHost = containerRootOnHost;
}
public Path containerRootOnHost() {
return containerRootOnHost;
}
public void createRoot() {
provider().createFileSystemRoot();
}
@Override
public ContainerFileSystemProvider provider() {
return containerFsProvider;
}
@Override
public boolean isOpen() {
return true;
}
@Override
public boolean isReadOnly() {
return false;
}
@Override
public String getSeparator() {
return "/";
}
@Override
public Set<String> supportedFileAttributeViews() {
return Set.of("basic", "posix", "unix", "owner");
}
@Override
public ContainerUserPrincipalLookupService getUserPrincipalLookupService() {
return containerFsProvider.userPrincipalLookupService();
}
@Override
public ContainerPath getPath(String first, String... more) {
return ContainerPath.fromPathInContainer(this, Path.of(first, more));
}
@Override
public void close() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Iterable<Path> getRootDirectories() {
throw new UnsupportedOperationException();
}
@Override
public Iterable<FileStore> getFileStores() {
throw new UnsupportedOperationException();
}
@Override
public PathMatcher getPathMatcher(String syntaxAndPattern) {
throw new UnsupportedOperationException();
}
@Override
public WatchService newWatchService() {
throw new UnsupportedOperationException();
}
} | class ContainerFileSystem extends FileSystem {
private final ContainerFileSystemProvider containerFsProvider;
private final Path containerRootOnHost;
ContainerFileSystem(ContainerFileSystemProvider containerFsProvider, Path containerRootOnHost) {
this.containerFsProvider = containerFsProvider;
this.containerRootOnHost = containerRootOnHost;
}
public Path containerRootOnHost() {
return containerRootOnHost;
}
public void createRoot() {
provider().createFileSystemRoot();
}
@Override
public ContainerFileSystemProvider provider() {
return containerFsProvider;
}
@Override
public boolean isOpen() {
return true;
}
@Override
public boolean isReadOnly() {
return false;
}
@Override
public String getSeparator() {
return "/";
}
@Override
public Set<String> supportedFileAttributeViews() {
return Set.of("basic", "posix", "unix", "owner");
}
@Override
public ContainerUserPrincipalLookupService getUserPrincipalLookupService() {
return containerFsProvider.userPrincipalLookupService();
}
@Override
public ContainerPath getPath(String first, String... more) {
return ContainerPath.fromPathInContainer(this, Path.of(first, more));
}
@Override
public void close() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Iterable<Path> getRootDirectories() {
throw new UnsupportedOperationException();
}
@Override
public Iterable<FileStore> getFileStores() {
throw new UnsupportedOperationException();
}
@Override
public PathMatcher getPathMatcher(String syntaxAndPattern) {
throw new UnsupportedOperationException();
}
@Override
public WatchService newWatchService() {
throw new UnsupportedOperationException();
}
} |
It was needed for unit tests, but I guess it makes more sense to special handle unit tests. Moved creation of root to where we create NAC for testing. | public static ContainerFileSystem create(Path containerStorageRoot, UserNamespace userNamespace, VespaUser vespaUser) {
ContainerFileSystem containerFs = new ContainerFileSystemProvider(containerStorageRoot, userNamespace, vespaUser).getFileSystem(null);
containerFs.createRoot();
return containerFs;
} | containerFs.createRoot(); | public static ContainerFileSystem create(Path containerStorageRoot, UserNamespace userNamespace, VespaUser vespaUser) {
return new ContainerFileSystemProvider(containerStorageRoot, userNamespace, vespaUser).getFileSystem(null);
} | class ContainerFileSystem extends FileSystem {
private final ContainerFileSystemProvider containerFsProvider;
private final Path containerRootOnHost;
ContainerFileSystem(ContainerFileSystemProvider containerFsProvider, Path containerRootOnHost) {
this.containerFsProvider = containerFsProvider;
this.containerRootOnHost = containerRootOnHost;
}
public Path containerRootOnHost() {
return containerRootOnHost;
}
public void createRoot() {
provider().createFileSystemRoot();
}
@Override
public ContainerFileSystemProvider provider() {
return containerFsProvider;
}
@Override
public boolean isOpen() {
return true;
}
@Override
public boolean isReadOnly() {
return false;
}
@Override
public String getSeparator() {
return "/";
}
@Override
public Set<String> supportedFileAttributeViews() {
return Set.of("basic", "posix", "unix", "owner");
}
@Override
public ContainerUserPrincipalLookupService getUserPrincipalLookupService() {
return containerFsProvider.userPrincipalLookupService();
}
@Override
public ContainerPath getPath(String first, String... more) {
return ContainerPath.fromPathInContainer(this, Path.of(first, more));
}
@Override
public void close() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Iterable<Path> getRootDirectories() {
throw new UnsupportedOperationException();
}
@Override
public Iterable<FileStore> getFileStores() {
throw new UnsupportedOperationException();
}
@Override
public PathMatcher getPathMatcher(String syntaxAndPattern) {
throw new UnsupportedOperationException();
}
@Override
public WatchService newWatchService() {
throw new UnsupportedOperationException();
}
} | class ContainerFileSystem extends FileSystem {
private final ContainerFileSystemProvider containerFsProvider;
private final Path containerRootOnHost;
ContainerFileSystem(ContainerFileSystemProvider containerFsProvider, Path containerRootOnHost) {
this.containerFsProvider = containerFsProvider;
this.containerRootOnHost = containerRootOnHost;
}
public Path containerRootOnHost() {
return containerRootOnHost;
}
public void createRoot() {
provider().createFileSystemRoot();
}
@Override
public ContainerFileSystemProvider provider() {
return containerFsProvider;
}
@Override
public boolean isOpen() {
return true;
}
@Override
public boolean isReadOnly() {
return false;
}
@Override
public String getSeparator() {
return "/";
}
@Override
public Set<String> supportedFileAttributeViews() {
return Set.of("basic", "posix", "unix", "owner");
}
@Override
public ContainerUserPrincipalLookupService getUserPrincipalLookupService() {
return containerFsProvider.userPrincipalLookupService();
}
@Override
public ContainerPath getPath(String first, String... more) {
return ContainerPath.fromPathInContainer(this, Path.of(first, more));
}
@Override
public void close() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Iterable<Path> getRootDirectories() {
throw new UnsupportedOperationException();
}
@Override
public Iterable<FileStore> getFileStores() {
throw new UnsupportedOperationException();
}
@Override
public PathMatcher getPathMatcher(String syntaxAndPattern) {
throw new UnsupportedOperationException();
}
@Override
public WatchService newWatchService() {
throw new UnsupportedOperationException();
}
} |
Consider using `getTotalBucketCount()` instead of shift expression | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < (1L << distributionBitCount) && (progressToken.getBucketCursor() - sliceId) % slices != 0) {
BucketId bucketId = toBucketId(progressToken.getBucketCursor(), distributionBitCount);
progressToken.addBucket(bucketId, ProgressToken.NULL_BUCKET, ProgressToken.BucketState.BUCKET_ACTIVE);
progressToken.updateProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
ProgressToken.FINISHED_BUCKET);
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
}
} | while (progressToken.getBucketCursor() < (1L << distributionBitCount) && (progressToken.getBucketCursor() - sliceId) % slices != 0) { | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < getTotalBucketCount() && (progressToken.getBucketCursor() % slices) != sliceId) {
progressToken.skipCurrentBucket();
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
private static BucketId toBucketId(long bucketCursor, int distributionBitCount) {
return new BucketId(ProgressToken.keyToBucketId(ProgressToken.makeNthBucketKey(bucketCursor, distributionBitCount)));
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(progressToken.getCurrentBucketId(), new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} |
Consider additional parenthesis to avoid any ambiguity with precedence between `%` and `!=` when reading the code | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < (1L << distributionBitCount) && (progressToken.getBucketCursor() - sliceId) % slices != 0) {
BucketId bucketId = toBucketId(progressToken.getBucketCursor(), distributionBitCount);
progressToken.addBucket(bucketId, ProgressToken.NULL_BUCKET, ProgressToken.BucketState.BUCKET_ACTIVE);
progressToken.updateProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
ProgressToken.FINISHED_BUCKET);
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
}
} | while (progressToken.getBucketCursor() < (1L << distributionBitCount) && (progressToken.getBucketCursor() - sliceId) % slices != 0) { | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < getTotalBucketCount() && (progressToken.getBucketCursor() % slices) != sliceId) {
progressToken.skipCurrentBucket();
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
private static BucketId toBucketId(long bucketCursor, int distributionBitCount) {
return new BucketId(ProgressToken.keyToBucketId(ProgressToken.makeNthBucketKey(bucketCursor, distributionBitCount)));
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(progressToken.getCurrentBucketId(), new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} |
These steps could, if needed, probably be squeezed down into a `ProgressToken.markCurrentBucketAsFinished()` (or similarly named) function which just modifies the finished bucket count instead of doing this indirectly via `addBucket`/`updateProgress` | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < (1L << distributionBitCount) && (progressToken.getBucketCursor() - sliceId) % slices != 0) {
BucketId bucketId = toBucketId(progressToken.getBucketCursor(), distributionBitCount);
progressToken.addBucket(bucketId, ProgressToken.NULL_BUCKET, ProgressToken.BucketState.BUCKET_ACTIVE);
progressToken.updateProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
ProgressToken.FINISHED_BUCKET);
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
}
} | ProgressToken.FINISHED_BUCKET); | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < getTotalBucketCount() && (progressToken.getBucketCursor() % slices) != sliceId) {
progressToken.skipCurrentBucket();
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
private static BucketId toBucketId(long bucketCursor, int distributionBitCount) {
return new BucketId(ProgressToken.keyToBucketId(ProgressToken.makeNthBucketKey(bucketCursor, distributionBitCount)));
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(progressToken.getCurrentBucketId(), new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} |
Also consider slightly simplifying subexpression to `((progressToken.getBucketCursor() % slices) != sliceId)` if it makes sense to do so | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < (1L << distributionBitCount) && (progressToken.getBucketCursor() - sliceId) % slices != 0) {
BucketId bucketId = toBucketId(progressToken.getBucketCursor(), distributionBitCount);
progressToken.addBucket(bucketId, ProgressToken.NULL_BUCKET, ProgressToken.BucketState.BUCKET_ACTIVE);
progressToken.updateProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
ProgressToken.FINISHED_BUCKET);
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
}
} | while (progressToken.getBucketCursor() < (1L << distributionBitCount) && (progressToken.getBucketCursor() - sliceId) % slices != 0) { | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < getTotalBucketCount() && (progressToken.getBucketCursor() % slices) != sliceId) {
progressToken.skipCurrentBucket();
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
private static BucketId toBucketId(long bucketCursor, int distributionBitCount) {
return new BucketId(ProgressToken.keyToBucketId(ProgressToken.makeNthBucketKey(bucketCursor, distributionBitCount)));
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(progressToken.getCurrentBucketId(), new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} |
What's the functional difference between `floorMod` and `(sliceId - nextBucket) % slices` for this use case? | public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
} | nextBucket += Math.floorMod(sliceId - nextBucket, slices); | public boolean hasNext();
public boolean shouldYield();
public boolean visitsAllBuckets();
public BucketProgress getNext();
public long getTotalBucketCount();
public int getDistributionBitCount();
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress);
public void update(BucketId superbucket, BucketId progress,
ProgressToken token);
}
/**
* Provides a bucket source that encompasses the entire range available
* through a given value of distribution bits
*/
protected static class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(progressToken.getCurrentBucketId(), new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < getTotalBucketCount() && (progressToken.getBucketCursor() % slices) != sliceId) {
progressToken.skipCurrentBucket();
}
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} | class BucketProgress {
private BucketId superbucket;
private BucketId progress;
public BucketProgress(BucketId superbucket, BucketId progress) {
this.superbucket = superbucket;
this.progress = progress;
}
public BucketId getProgress() {
return progress;
}
public BucketId getSuperbucket() {
return superbucket;
}
} | class BucketProgress {
private BucketId superbucket;
private BucketId progress;
public BucketProgress(BucketId superbucket, BucketId progress) {
this.superbucket = superbucket;
this.progress = progress;
}
public BucketId getProgress() {
return progress;
}
public BucketId getSuperbucket() {
return superbucket;
}
} |
Guarantees positivity. | public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
} | nextBucket += Math.floorMod(sliceId - nextBucket, slices); | public boolean hasNext();
public boolean shouldYield();
public boolean visitsAllBuckets();
public BucketProgress getNext();
public long getTotalBucketCount();
public int getDistributionBitCount();
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress);
public void update(BucketId superbucket, BucketId progress,
ProgressToken token);
}
/**
* Provides a bucket source that encompasses the entire range available
* through a given value of distribution bits
*/
protected static class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(progressToken.getCurrentBucketId(), new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < getTotalBucketCount() && (progressToken.getBucketCursor() % slices) != sliceId) {
progressToken.skipCurrentBucket();
}
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} | class BucketProgress {
private BucketId superbucket;
private BucketId progress;
public BucketProgress(BucketId superbucket, BucketId progress) {
this.superbucket = superbucket;
this.progress = progress;
}
public BucketId getProgress() {
return progress;
}
public BucketId getSuperbucket() {
return superbucket;
}
} | class BucketProgress {
private BucketId superbucket;
private BucketId progress;
public BucketProgress(BucketId superbucket, BucketId progress) {
this.superbucket = superbucket;
this.progress = progress;
}
public BucketId getProgress() {
return progress;
}
public BucketId getSuperbucket() {
return superbucket;
}
} |
Good idea. Reading through it, it seems like that's the only thing that happens, in addition to verifying the bucket isn't already added to the set of tracked buckets. | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < (1L << distributionBitCount) && (progressToken.getBucketCursor() - sliceId) % slices != 0) {
BucketId bucketId = toBucketId(progressToken.getBucketCursor(), distributionBitCount);
progressToken.addBucket(bucketId, ProgressToken.NULL_BUCKET, ProgressToken.BucketState.BUCKET_ACTIVE);
progressToken.updateProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
ProgressToken.FINISHED_BUCKET);
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
}
} | ProgressToken.FINISHED_BUCKET); | private void skipToSlice() {
if (distributionBitCount == 1)
return;
while (progressToken.getBucketCursor() < getTotalBucketCount() && (progressToken.getBucketCursor() % slices) != sliceId) {
progressToken.skipCurrentBucket();
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(toBucketId(progressToken.getBucketCursor(), distributionBitCount),
new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
private static BucketId toBucketId(long bucketCursor, int distributionBitCount) {
return new BucketId(ProgressToken.keyToBucketId(ProgressToken.makeNthBucketKey(bucketCursor, distributionBitCount)));
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} | class DistributionRangeBucketSource implements BucketSource {
private boolean flushActive = false;
private int distributionBitCount;
private final int slices;
private final int sliceId;
private ProgressToken progressToken;
public DistributionRangeBucketSource(int distributionBitCount,
ProgressToken progress,
int slices, int sliceId) {
if (slices < 1) {
throw new IllegalArgumentException("slices must be positive, but was " + slices);
}
if (sliceId < 0 || sliceId >= slices) {
throw new IllegalArgumentException("sliceId must be in [0, " + slices + "), but was " + sliceId);
}
this.slices = slices;
this.sliceId = sliceId;
progressToken = progress;
if (progressToken.getTotalBucketCount() == 0) {
assert(progressToken.isEmpty()) : "inconsistent progress state";
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
progressToken.setBucketCursor(0);
progressToken.setFinishedBucketCount(0);
this.distributionBitCount = distributionBitCount;
}
else {
this.distributionBitCount = progressToken.getDistributionBitCount();
if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) {
throw new IllegalArgumentException("Total bucket count in existing progress is not "
+ "consistent with that of the current document selection");
}
}
if (!progress.isFinished()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Importing unfinished progress token with " +
"bits: " + progressToken.getDistributionBitCount() +
", active: " + progressToken.getActiveBucketCount() +
", pending: " + progressToken.getPendingBucketCount() +
", cursor: " + progressToken.getBucketCursor() +
", finished: " + progressToken.getFinishedBucketCount() +
", total: " + progressToken.getTotalBucketCount());
}
if (!progress.isEmpty()) {
if (progressToken.getActiveBucketCount() > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Progress token had active buckets upon range " +
"construction. Setting these as pending");
}
progressToken.setAllBucketsToState(ProgressToken.BucketState.BUCKET_PENDING);
}
correctInconsistentPending(progressToken.getDistributionBitCount());
correctTruncatedBucketCursor();
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Partial bucket space progress; continuing "+
"from position " + progressToken.getBucketCursor());
}
}
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
} else {
assert(progressToken.getBucketCursor() == progressToken.getTotalBucketCount());
}
progressToken.setInconsistentState(false);
skipToSlice();
}
protected boolean isLosslessResetPossible() {
if (progressToken.getPendingBucketCount() != progressToken.getBucketCursor()) {
return false;
}
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progressToken.getBuckets().entrySet()) {
if (entry.getValue().getState() != ProgressToken.BucketState.BUCKET_PENDING) {
return false;
}
if (entry.getValue().getProgress().getId() != 0) {
return false;
}
}
return true;
}
/**
* Ensure that a given <code>ProgressToken</code> instance only has
* buckets pending that have a used-bits count of that of the
* <code>targetDistCits</code>. This is done by splitting or merging
* all inconsistent buckets until the desired state is reached.
*
* Time complexity is approx <i>O(4bn)</i> where <i>b</i> is the maximum
* delta of bits to change anywhere in the set of pending and <i>n</i>
* is the number of pending. This includes the time spent making shallow
* map copies.
*
* @param targetDistBits The desired distribution bit count of the buckets
*/
private void correctInconsistentPending(int targetDistBits) {
boolean maybeInconsistent = true;
long bucketsSplit = 0, bucketsMerged = 0;
long pendingBefore = progressToken.getPendingBucketCount();
ProgressToken p = progressToken;
if (isLosslessResetPossible()) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "At start of bucket space and all " +
"buckets have no progress; doing a lossless reset " +
"instead of splitting/merging");
}
assert(p.getActiveBucketCount() == 0);
p.clearAllBuckets();
p.setBucketCursor(0);
skipToSlice();
return;
}
while (maybeInconsistent) {
BucketId lastMergedBucket = null;
maybeInconsistent = false;
TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets
= new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() < targetDistBits) {
if (pending.getUsedBits() + 1 < targetDistBits) {
maybeInconsistent = true;
}
p.splitPendingBucket(pending);
++bucketsSplit;
}
}
buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets());
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: buckets.entrySet()) {
assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING);
BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey()));
if (pending.getUsedBits() > targetDistBits) {
if (lastMergedBucket != null) {
BucketId rightCheck = new BucketId(lastMergedBucket.getUsedBits(),
lastMergedBucket.getId() | (1L << (lastMergedBucket.getUsedBits() - 1)));
if (pending.equals(rightCheck)) {
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "Skipped " + pending +
", as it was right sibling of " + lastMergedBucket);
}
continue;
}
}
if (pending.getUsedBits() - 1 > targetDistBits) {
maybeInconsistent = true;
}
p.mergePendingBucket(pending);
++bucketsMerged;
lastMergedBucket = pending;
}
}
}
if ((bucketsSplit > 0 || bucketsMerged > 0) && log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Existing progress' pending buckets had inconsistent " +
"distribution bits; performed " + bucketsSplit + " split ops and " +
bucketsMerged + " merge ops. Pending: " + pendingBefore + " -> " +
p.getPendingBucketCount());
}
}
private void correctTruncatedBucketCursor() {
for (ProgressToken.BucketKeyWrapper bucketKey
: progressToken.getBuckets().keySet()) {
BucketId bid = bucketKey.toBucketId();
long idx = bucketKey.getKey() >>> (64 - bid.getUsedBits());
if (bid.getUsedBits() == distributionBitCount
&& idx >= progressToken.getBucketCursor()) {
progressToken.setBucketCursor(idx + 1);
}
}
if (log.isLoggable(Level.FINEST)) {
log.log(Level.FINEST, "New range bucket cursor is " +
progressToken.getBucketCursor());
}
}
public boolean hasNext() {
long nextBucket = progressToken.getBucketCursor();
if (distributionBitCount != 1) {
nextBucket += Math.floorMod(sliceId - nextBucket, slices);
}
return nextBucket < (1L << distributionBitCount);
}
public boolean shouldYield() {
return flushActive;
}
public boolean visitsAllBuckets() {
return true;
}
public long getTotalBucketCount() {
return 1L << distributionBitCount;
}
public BucketProgress getNext() {
assert(hasNext()) : "getNext() called with hasNext() == false";
BucketProgress progress = new BucketProgress(progressToken.getCurrentBucketId(), new BucketId());
progressToken.setBucketCursor(progressToken.getBucketCursor() + 1);
skipToSlice();
return progress;
}
public int getDistributionBitCount() {
return distributionBitCount;
}
public void setDistributionBitCount(int distributionBitCount,
ProgressToken progress)
{
this.distributionBitCount = distributionBitCount;
if (progressToken.getActiveBucketCount() > 0) {
flushActive = true;
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Holding off new/pending buckets and consistency " +
"correction until all " + progress.getActiveBucketCount() +
" active buckets have been updated");
}
progressToken.setInconsistentState(true);
} else {
int delta = distributionBitCount - progressToken.getDistributionBitCount();
correctInconsistentPending(distributionBitCount);
if (delta > 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Increasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() + " to " +
distributionBitCount);
}
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta);
progressToken.setBucketCursor(progressToken.getBucketCursor() << delta);
} else if (delta < 0) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Decreasing distribution bits for full bucket " +
"space range source from " + progressToken.getDistributionBitCount() +
" to " + distributionBitCount + " bits");
}
progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta);
progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
}
progressToken.setTotalBucketCount(1L << distributionBitCount);
progressToken.setDistributionBitCount(distributionBitCount);
correctTruncatedBucketCursor();
progressToken.setInconsistentState(false);
}
}
public void update(BucketId superbucket, BucketId progress,
ProgressToken token) {
progressToken.updateProgress(superbucket, progress);
if (superbucket.getUsedBits() != distributionBitCount) {
if (!progress.equals(ProgressToken.FINISHED_BUCKET)) {
assert(flushActive);
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received non-finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
} else {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "Received finished bucket " +
superbucket + " with wrong distribution bit count (" +
superbucket.getUsedBits() + "). Waiting to correct " +
"until all active are done");
}
}
}
if (progressToken.getActiveBucketCount() == 0) {
if (flushActive) {
if (log.isLoggable(Level.FINE)) {
log.log(Level.FINE, "All active buckets flushed, " +
"correcting progress token and continuing normal operation");
}
setDistributionBitCount(distributionBitCount, progressToken);
assert(progressToken.getDistributionBitCount() == distributionBitCount);
}
flushActive = false;
if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) {
progressToken.setFinishedBucketCount(progressToken.getBucketCursor() -
progressToken.getPendingBucketCount());
}
}
}
} |
Use `lockAndGetRequired` | public void patch(String hostname, InputStream json) {
Inspector root = Exceptions.uncheck(() -> SlimeUtils.jsonToSlime(json.readAllBytes())).get();
Map<String, Inspector> fields = new HashMap<>();
root.traverse(fields::put);
Map<String, Inspector> regularFields = Maps.filterKeys(fields, k -> !IP_CONFIG_FIELDS.contains(k));
Map<String, Inspector> ipConfigFields = Maps.filterKeys(fields, IP_CONFIG_FIELDS::contains);
Map<String, Inspector> recursiveFields = Maps.filterKeys(fields, RECURSIVE_FIELDS::contains);
NodeMutex nodeMutex = nodeRepository.nodes().lockAndGet(hostname)
.orElseThrow(() -> new NotFoundException("No node found with hostname " + hostname));
patch(nodeMutex, regularFields, root, false);
patchIpConfig(hostname, ipConfigFields);
if (nodeMutex.node().type().isHost()) {
patchChildrenOf(hostname, recursiveFields, root);
}
} | .orElseThrow(() -> new NotFoundException("No node found with hostname " + hostname)); | public void patch(String hostname, InputStream json) {
Inspector root = Exceptions.uncheck(() -> SlimeUtils.jsonToSlime(json.readAllBytes())).get();
Map<String, Inspector> fields = new HashMap<>();
root.traverse(fields::put);
Map<String, Inspector> regularFields = Maps.filterKeys(fields, k -> !IP_CONFIG_FIELDS.contains(k));
Map<String, Inspector> ipConfigFields = Maps.filterKeys(fields, IP_CONFIG_FIELDS::contains);
Map<String, Inspector> recursiveFields = Maps.filterKeys(fields, RECURSIVE_FIELDS::contains);
NodeMutex nodeMutex = nodeRepository.nodes().lockAndGetRequired(hostname);
patch(nodeMutex, regularFields, root, false);
patchIpConfig(hostname, ipConfigFields);
if (nodeMutex.node().type().isHost()) {
patchChildrenOf(hostname, recursiveFields, root);
}
} | class NodePatcher {
private static final String WANT_TO_RETIRE = "wantToRetire";
private static final String WANT_TO_DEPROVISION = "wantToDeprovision";
private static final String WANT_TO_REBUILD = "wantToRebuild";
private static final Set<String> RECURSIVE_FIELDS = Set.of(WANT_TO_RETIRE);
private static final Set<String> IP_CONFIG_FIELDS = Set.of("ipAddresses",
"additionalIpAddresses",
"additionalHostnames");
private final NodeRepository nodeRepository;
private final NodeFlavors nodeFlavors;
private final Clock clock;
public NodePatcher(NodeFlavors nodeFlavors, NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
this.nodeFlavors = nodeFlavors;
this.clock = nodeRepository.clock();
}
/**
* Apply given JSON to the node identified by hostname. Any patched node(s) are written to the node repository.
*
* Note: This may patch more than one node if the field being patched must be applied recursively to host and node.
*/
private void patch(NodeMutex nodeMutex, Map<String, Inspector> fields, Inspector root, boolean applyingAsChild) {
try (var lock = nodeMutex) {
Node node = nodeMutex.node();
for (var kv : fields.entrySet()) {
String name = kv.getKey();
Inspector value = kv.getValue();
try {
node = applyField(node, name, value, root, applyingAsChild);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
}
nodeRepository.nodes().write(node, lock);
}
}
private void patchIpConfig(String hostname, Map<String, Inspector> ipConfigFields) {
if (ipConfigFields.isEmpty()) return;
try (var allocationLock = nodeRepository.nodes().lockUnallocated()) {
LockedNodeList nodes = nodeRepository.nodes().list(allocationLock);
Node node = nodes.node(hostname).orElseThrow(() -> new NotFoundException("No node found with hostname " + hostname));
for (var kv : ipConfigFields.entrySet()) {
String name = kv.getKey();
Inspector value = kv.getValue();
try {
node = applyIpconfigField(node, name, value, nodes);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
}
nodeRepository.nodes().write(node, allocationLock);
}
}
private void patchChildrenOf(String hostname, Map<String, Inspector> recursiveFields, Inspector root) {
if (recursiveFields.isEmpty()) return;
NodeList children = nodeRepository.nodes().list().childrenOf(hostname);
for (var child : children) {
Optional<NodeMutex> childNodeMutex = nodeRepository.nodes().lockAndGet(child.hostname());
if (childNodeMutex.isEmpty()) continue;
patch(childNodeMutex.get(), recursiveFields, root, true);
}
}
private Node applyField(Node node, String name, Inspector value, Inspector root, boolean applyingAsChild) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(node, asLong(value));
case "currentDockerImage" :
if (node.type().isHost())
throw new IllegalArgumentException("Container image can only be set for child nodes");
return node.with(node.status().withContainerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.withCurrentOsVersion(Version.fromString(asString(value)), clock.instant());
case "currentFirmwareCheck":
return node.withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value)));
case "failCount" :
return node.with(node.status().withFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)), Agent.operator, clock.instant());
case "parentHostname" :
return node.withParentHostname(asString(value));
case WANT_TO_RETIRE:
case WANT_TO_DEPROVISION:
case WANT_TO_REBUILD:
boolean wantToRetire = asOptionalBoolean(root.field(WANT_TO_RETIRE)).orElse(node.status().wantToRetire());
boolean wantToDeprovision = asOptionalBoolean(root.field(WANT_TO_DEPROVISION)).orElse(node.status().wantToDeprovision());
boolean wantToRebuild = asOptionalBoolean(root.field(WANT_TO_REBUILD)).orElse(node.status().wantToRebuild());
return node.withWantToRetire(wantToRetire,
wantToDeprovision && !applyingAsChild,
wantToRebuild && !applyingAsChild,
Agent.operator,
clock.instant());
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "diskGb":
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())), Agent.operator, clock.instant());
case "memoryGb":
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())), Agent.operator, clock.instant());
case "vcpu":
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())), Agent.operator, clock.instant());
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? fast : slow)), Agent.operator, clock.instant());
case "remoteStorage":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? remote : local)), Agent.operator, clock.instant());
case "bandwidthGbps":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble())), Agent.operator, clock.instant());
case "modelName":
return value.type() == Type.NIX ? node.withoutModelName() : node.withModelName(asString(value));
case "requiredDiskSpeed":
return patchRequiredDiskSpeed(node, asString(value));
case "reservedTo":
return value.type() == Type.NIX ? node.withoutReservedTo() : node.withReservedTo(TenantName.from(value.asString()));
case "exclusiveTo":
case "exclusiveToApplicationId":
return node.withExclusiveToApplicationId(SlimeUtils.optionalString(value).map(ApplicationId::fromSerializedForm).orElse(null));
case "exclusiveToClusterType":
return node.withExclusiveToClusterType(SlimeUtils.optionalString(value).map(ClusterSpec.Type::valueOf).orElse(null));
case "switchHostname":
return value.type() == Type.NIX ? node.withoutSwitchHostname() : node.withSwitchHostname(value.asString());
case "trustStore":
return nodeWithTrustStore(node, value);
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
}
private Node applyIpconfigField(Node node, String name, Inspector value, LockedNodeList nodes) {
switch (name) {
case "ipAddresses":
return IP.Config.verify(node.with(node.ipConfig().withPrimary(asStringSet(value))), nodes);
case "additionalIpAddresses":
return IP.Config.verify(node.with(node.ipConfig().withPool(node.ipConfig().pool().withIpAddresses(asStringSet(value)))), nodes);
case "additionalHostnames":
return IP.Config.verify(node.with(node.ipConfig().withPool(node.ipConfig().pool().withAddresses(asAddressList(value)))), nodes);
}
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
Node patchedNode;
if (reportsInspector.type() == Type.NIX) {
patchedNode = node.with(new Reports());
} else {
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
patchedNode = node.with(reportsBuilder.build());
}
boolean hadHardFailReports = node.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
boolean hasHardFailReports = patchedNode.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
if (hadHardFailReports != hasHardFailReports) {
if ((hasHardFailReports && node.state() == Node.State.failed) || node.state() == Node.State.parked)
return patchedNode;
patchedNode = patchedNode.withWantToRetire(hasHardFailReports, hasHardFailReports, Agent.system, clock.instant());
}
return patchedNode;
}
private Node nodeWithTrustStore(Node node, Inspector inspector) {
List<TrustStoreItem> trustStoreItems =
SlimeUtils.entriesStream(inspector)
.map(TrustStoreItem::fromSlime)
.collect(Collectors.toList());
return node.with(trustStoreItems);
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private List<Address> asAddressList(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
List<Address> addresses = new ArrayList<>(field.entries());
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
Address address = new Address(entry.asString());
addresses.add(address);
}
return addresses;
}
private Node patchRequiredDiskSpeed(Node node, String value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRequestedResources(
allocation.get().requestedResources().with(NodeResources.DiskSpeed.valueOf(value))));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Node patchCurrentRestartGeneration(Node node, Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
private Optional<Boolean> asOptionalBoolean(Inspector field) {
return Optional.of(field).filter(Inspector::valid).map(this::asBoolean);
}
} | class NodePatcher {
private static final String WANT_TO_RETIRE = "wantToRetire";
private static final String WANT_TO_DEPROVISION = "wantToDeprovision";
private static final String WANT_TO_REBUILD = "wantToRebuild";
private static final Set<String> RECURSIVE_FIELDS = Set.of(WANT_TO_RETIRE);
private static final Set<String> IP_CONFIG_FIELDS = Set.of("ipAddresses",
"additionalIpAddresses",
"additionalHostnames");
private final NodeRepository nodeRepository;
private final NodeFlavors nodeFlavors;
private final Clock clock;
public NodePatcher(NodeFlavors nodeFlavors, NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
this.nodeFlavors = nodeFlavors;
this.clock = nodeRepository.clock();
}
/**
* Apply given JSON to the node identified by hostname. Any patched node(s) are written to the node repository.
*
* Note: This may patch more than one node if the field being patched must be applied recursively to host and node.
*/
private void patch(NodeMutex nodeMutex, Map<String, Inspector> fields, Inspector root, boolean applyingAsChild) {
try (var lock = nodeMutex) {
Node node = nodeMutex.node();
for (var kv : fields.entrySet()) {
String name = kv.getKey();
Inspector value = kv.getValue();
try {
node = applyField(node, name, value, root, applyingAsChild);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
}
nodeRepository.nodes().write(node, lock);
}
}
private void patchIpConfig(String hostname, Map<String, Inspector> ipConfigFields) {
if (ipConfigFields.isEmpty()) return;
try (var allocationLock = nodeRepository.nodes().lockUnallocated()) {
LockedNodeList nodes = nodeRepository.nodes().list(allocationLock);
Node node = nodes.node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
for (var kv : ipConfigFields.entrySet()) {
String name = kv.getKey();
Inspector value = kv.getValue();
try {
node = applyIpconfigField(node, name, value, nodes);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
}
nodeRepository.nodes().write(node, allocationLock);
}
}
private void patchChildrenOf(String hostname, Map<String, Inspector> recursiveFields, Inspector root) {
if (recursiveFields.isEmpty()) return;
NodeList children = nodeRepository.nodes().list().childrenOf(hostname);
for (var child : children) {
Optional<NodeMutex> childNodeMutex = nodeRepository.nodes().lockAndGet(child.hostname());
if (childNodeMutex.isEmpty()) continue;
patch(childNodeMutex.get(), recursiveFields, root, true);
}
}
private Node applyField(Node node, String name, Inspector value, Inspector root, boolean applyingAsChild) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(node, asLong(value));
case "currentDockerImage" :
if (node.type().isHost())
throw new IllegalArgumentException("Container image can only be set for child nodes");
return node.with(node.status().withContainerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.withCurrentOsVersion(Version.fromString(asString(value)), clock.instant());
case "currentFirmwareCheck":
return node.withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value)));
case "failCount" :
return node.with(node.status().withFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)), Agent.operator, clock.instant());
case "parentHostname" :
return node.withParentHostname(asString(value));
case WANT_TO_RETIRE:
case WANT_TO_DEPROVISION:
case WANT_TO_REBUILD:
boolean wantToRetire = asOptionalBoolean(root.field(WANT_TO_RETIRE)).orElse(node.status().wantToRetire());
boolean wantToDeprovision = asOptionalBoolean(root.field(WANT_TO_DEPROVISION)).orElse(node.status().wantToDeprovision());
boolean wantToRebuild = asOptionalBoolean(root.field(WANT_TO_REBUILD)).orElse(node.status().wantToRebuild());
return node.withWantToRetire(wantToRetire,
wantToDeprovision && !applyingAsChild,
wantToRebuild && !applyingAsChild,
Agent.operator,
clock.instant());
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "diskGb":
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())), Agent.operator, clock.instant());
case "memoryGb":
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())), Agent.operator, clock.instant());
case "vcpu":
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())), Agent.operator, clock.instant());
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? fast : slow)), Agent.operator, clock.instant());
case "remoteStorage":
return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? remote : local)), Agent.operator, clock.instant());
case "bandwidthGbps":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble())), Agent.operator, clock.instant());
case "modelName":
return value.type() == Type.NIX ? node.withoutModelName() : node.withModelName(asString(value));
case "requiredDiskSpeed":
return patchRequiredDiskSpeed(node, asString(value));
case "reservedTo":
return value.type() == Type.NIX ? node.withoutReservedTo() : node.withReservedTo(TenantName.from(value.asString()));
case "exclusiveTo":
case "exclusiveToApplicationId":
return node.withExclusiveToApplicationId(SlimeUtils.optionalString(value).map(ApplicationId::fromSerializedForm).orElse(null));
case "exclusiveToClusterType":
return node.withExclusiveToClusterType(SlimeUtils.optionalString(value).map(ClusterSpec.Type::valueOf).orElse(null));
case "switchHostname":
return value.type() == Type.NIX ? node.withoutSwitchHostname() : node.withSwitchHostname(value.asString());
case "trustStore":
return nodeWithTrustStore(node, value);
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
}
private Node applyIpconfigField(Node node, String name, Inspector value, LockedNodeList nodes) {
switch (name) {
case "ipAddresses":
return IP.Config.verify(node.with(node.ipConfig().withPrimary(asStringSet(value))), nodes);
case "additionalIpAddresses":
return IP.Config.verify(node.with(node.ipConfig().withPool(node.ipConfig().pool().withIpAddresses(asStringSet(value)))), nodes);
case "additionalHostnames":
return IP.Config.verify(node.with(node.ipConfig().withPool(node.ipConfig().pool().withAddresses(asAddressList(value)))), nodes);
}
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
Node patchedNode;
if (reportsInspector.type() == Type.NIX) {
patchedNode = node.with(new Reports());
} else {
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
patchedNode = node.with(reportsBuilder.build());
}
boolean hadHardFailReports = node.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
boolean hasHardFailReports = patchedNode.reports().getReports().stream()
.anyMatch(r -> r.getType() == Report.Type.HARD_FAIL);
if (hadHardFailReports != hasHardFailReports) {
if ((hasHardFailReports && node.state() == Node.State.failed) || node.state() == Node.State.parked)
return patchedNode;
patchedNode = patchedNode.withWantToRetire(hasHardFailReports, hasHardFailReports, Agent.system, clock.instant());
}
return patchedNode;
}
private Node nodeWithTrustStore(Node node, Inspector inspector) {
List<TrustStoreItem> trustStoreItems =
SlimeUtils.entriesStream(inspector)
.map(TrustStoreItem::fromSlime)
.collect(Collectors.toList());
return node.with(trustStoreItems);
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private List<Address> asAddressList(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
List<Address> addresses = new ArrayList<>(field.entries());
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
Address address = new Address(entry.asString());
addresses.add(address);
}
return addresses;
}
private Node patchRequiredDiskSpeed(Node node, String value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRequestedResources(
allocation.get().requestedResources().with(NodeResources.DiskSpeed.valueOf(value))));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Node patchCurrentRestartGeneration(Node node, Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
private Optional<Boolean> asOptionalBoolean(Inspector field) {
return Optional.of(field).filter(Inspector::valid).map(this::asBoolean);
}
} |
s/distributino/distribution/ | public void testIgnoredSlicing() throws ParseException {
int distBits = 1;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 3, 2);
assertTrue(iter.hasNext());
VisitorIterator.BucketProgress first = iter.getNext();
assertEquals(ProgressToken.toBucketId(0, 1), first.getSuperbucket());
VisitorIterator.BucketProgress second = iter.getNext();
assertEquals(ProgressToken.toBucketId(1, 1), second.getSuperbucket());
assertFalse(iter.hasNext());
iter.update(first.getSuperbucket(), first.getProgress());
iter.setDistributionBitCount(2);
assertEquals(2, iter.getDistributionBitCount());
assertEquals(1, progress.getDistributionBitCount());
iter.update(second.getSuperbucket(), second.getProgress());
assertEquals(2, iter.getDistributionBitCount());
assertEquals(2, progress.getDistributionBitCount());
assertTrue(iter.hasNext());
assertEquals(ProgressToken.toBucketId(2, 2), iter.getNext().getSuperbucket());
assertFalse(iter.hasNext());
} | public void testIgnoredSlicing() throws ParseException {
int distBits = 1;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 3, 2);
assertTrue(iter.hasNext());
VisitorIterator.BucketProgress first = iter.getNext();
assertEquals(ProgressToken.toBucketId(0, 1), first.getSuperbucket());
VisitorIterator.BucketProgress second = iter.getNext();
assertEquals(ProgressToken.toBucketId(1, 1), second.getSuperbucket());
assertFalse(iter.hasNext());
iter.update(first.getSuperbucket(), first.getProgress());
iter.setDistributionBitCount(2);
assertEquals(2, iter.getDistributionBitCount());
assertEquals(1, progress.getDistributionBitCount());
iter.update(second.getSuperbucket(), second.getProgress());
assertEquals(2, iter.getDistributionBitCount());
assertEquals(2, progress.getDistributionBitCount());
assertTrue(iter.hasNext());
assertEquals(ProgressToken.toBucketId(2, 2), iter.getNext().getSuperbucket());
assertFalse(iter.hasNext());
} | class VisitorIteratorTestCase {
@Test
public void testIterationSingleBucketUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user = 1234", idFactory, 1, progress);
assertFalse(progress.hasActive());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), 1);
assertFalse(iter.isDone());
assertTrue(iter.hasNext());
assertEquals(iter.getRemainingBucketCount(), 1);
VisitorIterator.BucketProgress b1 = iter.getNext();
assertEquals(b1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(b1.getProgress(), new BucketId());
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(iter.getRemainingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 1);
assertFalse(progress.hasPending());
BucketId sub = new BucketId(b1.getSuperbucket().getUsedBits() + 1, b1.getSuperbucket().getId());
iter.update(b1.getSuperbucket(), sub);
assertFalse(progress.hasActive());
assertEquals(progress.getPendingBucketCount(), 1);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(iter.getRemainingBucketCount(), 1);
VisitorIterator.BucketProgress b2 = iter.getNext();
assertEquals(b2.getSuperbucket(), new BucketId(32, 1234));
assertEquals(b2.getProgress(), new BucketId(33, 1234));
assertFalse(iter.hasNext());
assertEquals(progress.getActiveBucketCount(), 1);
assertFalse(progress.hasPending());
iter.update(b1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(progress.hasActive());
assertFalse(progress.hasPending());
assertFalse(iter.hasNext());
assertTrue(iter.isDone());
assertTrue(progress.isFinished());
assertEquals(progress.getFinishedBucketCount(), 1);
assertEquals(iter.getRemainingBucketCount(), 0);
}
@Test
public void testInvalidSlicing() throws ParseException {
int distBits = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 0, 0);
}
catch (IllegalArgumentException e) {
assertEquals("slices must be positive, but was 0", e.getMessage());
}
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 1, 1);
}
catch (IllegalArgumentException e) {
assertEquals("sliceId must be in [0, 1), but was 1", e.getMessage());
}
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 1, -1);
}
catch (IllegalArgumentException e) {
assertEquals("sliceId must be in [0, 1), but was -1", e.getMessage());
}
}
@Test
@Test
public void testValidSlicing() throws ParseException {
int distBits = 4;
long buckets = 1 << distBits;
BucketIdFactory idFactory = new BucketIdFactory();
for (int slices = 1; slices <= 2 * buckets; slices++) {
long bucketsTotal = 0;
for (int sliceId = 0; sliceId < slices; sliceId++) {
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, slices, sliceId);
String context = "slices: " + slices + ", sliceId: " + sliceId;
assertEquals(context, progress.getDistributionBitCount(), distBits);
assertTrue(context, iter.getBucketSource() instanceof VisitorIterator.DistributionRangeBucketSource);
assertEquals(context, progress.getFinishedBucketCount(), Math.min(buckets, sliceId));
assertEquals(context, progress.getTotalBucketCount(), buckets);
long bucketCount = 0;
while (iter.hasNext() && progress.getFinishedBucketCount() < buckets / 2) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
++bucketsTotal;
}
if (slices + sliceId < buckets) {
assertEquals(context, ((buckets / 2) + slices - sliceId - 1) / slices, bucketCount);
assertFalse(context, progress.hasActive());
assertFalse(context, progress.hasPending());
assertFalse(context, iter.isDone());
assertTrue(context, iter.hasNext());
assertEquals(context, progress.getFinishedBucketCount(), bucketCount * slices + sliceId);
assertFalse(context, progress.isFinished());
}
while (iter.hasNext()) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
++bucketsTotal;
}
assertEquals(context, (buckets + slices - sliceId - 1) / slices, bucketCount);
assertFalse(context, progress.hasActive());
assertFalse(context, progress.hasPending());
assertTrue(context, iter.isDone());
assertFalse(context, iter.hasNext());
assertEquals(context, progress.getFinishedBucketCount(), buckets);
assertTrue(context, progress.isFinished());
}
assertEquals("slices: " + slices, buckets, bucketsTotal);
}
}
@Test
public void testProgressSerializationRange() throws ParseException {
int distBits = 4;
int buckets = 1 << distBits;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), distBits);
assertTrue(iter.getBucketSource() instanceof VisitorIterator.DistributionRangeBucketSource);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), buckets);
long bucketCount = 0;
long bucketStop = buckets / 2;
while (iter.hasNext() && bucketCount != bucketStop) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertEquals(bucketCount, bucketStop);
assertFalse(progress.hasActive());
assertFalse(progress.hasPending());
assertFalse(iter.isDone());
assertTrue(iter.hasNext());
assertEquals(progress.getFinishedBucketCount(), bucketCount);
assertFalse(progress.isFinished());
StringBuilder desired = new StringBuilder();
desired.append("VDS bucket progress file (50.0% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(buckets);
desired.append('\n');
assertEquals(desired.toString(), progress.toString());
BucketIdFactory idFactory2 = new BucketIdFactory();
{
ProgressToken progDs = new ProgressToken(progress.toString());
assertEquals(progDs.getDistributionBitCount(), distBits);
assertEquals(progDs.getTotalBucketCount(), buckets);
assertEquals(progDs.getFinishedBucketCount(), bucketCount);
VisitorIterator iterDs = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, progDs);
assertFalse(progDs.hasPending());
assertFalse(progDs.hasActive());
assertTrue(iterDs.hasNext());
assertFalse(iterDs.isDone());
assertEquals(distBits, iterDs.getDistributionBitCount());
assertEquals(distBits, progDs.getDistributionBitCount());
VisitorIterator.BucketProgress idDs = iterDs.getNext();
long resumeKey = ProgressToken.makeNthBucketKey(bucketCount, distBits);
assertEquals(idDs.getSuperbucket(), new BucketId(ProgressToken.keyToBucketId(resumeKey)));
assertEquals(idDs.getProgress(), new BucketId());
}
int pendingTotal = buckets / 8;
int activeTotal = buckets / 8;
Vector<VisitorIterator.BucketProgress> trackedBuckets = new Vector<VisitorIterator.BucketProgress>();
for (int i = 0; i < pendingTotal + activeTotal; ++i) {
trackedBuckets.add(iter.getNext());
}
for (int i = 0; i < pendingTotal + activeTotal; ++i) {
VisitorIterator.BucketProgress idTemp = trackedBuckets.get(i);
if (i < activeTotal) {
iter.update(idTemp.getSuperbucket(),
new BucketId(distBits + 2, idTemp.getSuperbucket().getId() | (2 * buckets)));
}
}
assertEquals(progress.getActiveBucketCount(), activeTotal);
assertEquals(progress.getPendingBucketCount(), pendingTotal);
desired = new StringBuilder();
desired.append("VDS bucket progress file (").append(progress.percentFinished()).append("% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(bucketCount + pendingTotal + activeTotal);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(buckets);
desired.append('\n');
assertEquals(progress.getBuckets().entrySet().size(), pendingTotal + activeTotal);
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progress.getBuckets().entrySet()) {
desired.append(Long.toHexString(ProgressToken.keyToBucketId(entry.getKey().getKey())));
desired.append(':');
desired.append(Long.toHexString(entry.getValue().getProgress().getRawId()));
desired.append('\n');
}
assertEquals(progress.toString(), desired.toString());
{
ProgressToken progDs = new ProgressToken(progress.toString());
assertEquals(progDs.getDistributionBitCount(), distBits);
assertEquals(progDs.getTotalBucketCount(), buckets);
assertEquals(progDs.getFinishedBucketCount(), bucketCount);
VisitorIterator iterDs = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, progDs);
assertEquals(progDs.getPendingBucketCount(), pendingTotal + activeTotal);
assertEquals(distBits, progDs.getDistributionBitCount());
assertEquals(distBits, iterDs.getDistributionBitCount());
assertFalse(progDs.hasActive());
assertTrue(iterDs.hasNext());
assertFalse(iterDs.isDone());
assertEquals(progDs.getBucketCursor(), bucketCount + pendingTotal + activeTotal);
}
for (int i = activeTotal; i < activeTotal + pendingTotal; ++i) {
iter.update(trackedBuckets.get(i).getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertEquals(progress.getActiveBucketCount(), 0);
boolean consistentNext = true;
while (!iter.isDone()) {
if (!iter.hasNext()) {
consistentNext = false;
break;
}
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertTrue(consistentNext);
assertFalse(iter.hasNext());
assertTrue(progress.isFinished());
assertEquals(bucketCount, buckets);
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file (100.0% completed)\n");
finished.append(distBits);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
assertEquals(progress.toString(), finished.toString());
}
@Test
public void testProgressSerializationExplicit() throws ParseException {
int distBits = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), distBits);
assertTrue(iter.getBucketSource() instanceof VisitorIterator.ExplicitBucketSource);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), 3);
assertEquals(progress.getPendingBucketCount(), 3);
VisitorIterator.BucketProgress bp1 = iter.getNext();
VisitorIterator.BucketProgress bp2 = iter.getNext();
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 2);
assertEquals(bp1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp1.getProgress(), new BucketId());
iter.update(bp1.getSuperbucket(), new BucketId(36, 1234));
assertEquals(progress.getPendingBucketCount(), 2);
assertEquals(bp2.getSuperbucket(), new BucketId(32, 8009));
assertEquals(bp2.getProgress(), new BucketId());
{
StringBuilder desired = new StringBuilder();
desired.append("VDS bucket progress file (").append(progress.percentFinished()).append("% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(0);
desired.append('\n');
desired.append(0);
desired.append('\n');
desired.append(3);
desired.append('\n');
desired.append(Long.toHexString(new BucketId(32, 1234).getRawId()));
desired.append(':');
desired.append(Long.toHexString(new BucketId(36, 1234).getRawId()));
desired.append('\n');
desired.append(Long.toHexString(new BucketId(32, 8009).getRawId()));
desired.append(":0\n");
desired.append(Long.toHexString(new BucketId(32, 6789).getRawId()));
desired.append(":0\n");
assertEquals(desired.toString(), progress.toString());
ProgressToken prog2 = new ProgressToken(progress.toString());
assertEquals(prog2.getDistributionBitCount(), distBits);
assertEquals(prog2.getTotalBucketCount(), 3);
assertEquals(prog2.getFinishedBucketCount(), 0);
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, prog2);
assertEquals(prog2.getPendingBucketCount(), 3);
assertFalse(prog2.hasActive());
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
assertTrue(iter2.getBucketSource() instanceof VisitorIterator.ExplicitBucketSource);
assertFalse(iter2.getBucketSource().hasNext());
VisitorIterator.BucketProgress bp = iter2.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp.getProgress(), new BucketId(36, 1234));
assertEquals(prog2.getPendingBucketCount(), 2);
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
bp = iter2.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(32, 8009));
assertEquals(bp.getProgress(), new BucketId());
assertEquals(prog2.getPendingBucketCount(), 1);
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
bp = iter2.getNext();
assertEquals(prog2.getPendingBucketCount(), 0);
assertEquals(bp.getSuperbucket(), new BucketId(32, 6789));
assertEquals(bp.getProgress(), new BucketId());
assertFalse(iter2.hasNext());
assertFalse(iter2.isDone());
assertEquals(prog2.getActiveBucketCount(), 3);
}
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
bp1 = iter.getNext();
assertEquals(bp1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp1.getProgress(), new BucketId(36, 1234));
iter.update(bp1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
bp1 = iter.getNext();
assertEquals(bp1.getSuperbucket(), new BucketId(32, 6789));
assertEquals(bp1.getProgress(), new BucketId());
assertEquals(
progress.toString(),
"VDS bucket progress file (" + progress.percentFinished() + "% completed)\n" +
"16\n" +
"0\n" +
"1\n" +
"3\n" +
"8000000000001f49:0\n" +
"8000000000001a85:0\n");
iter.update(bp1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(bp2.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(iter.hasNext());
assertTrue(iter.isDone());
assertTrue(progress.isFinished());
assertEquals(progress.getActiveBucketCount(), 0);
{
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file (100.0% completed)\n");
finished.append(distBits);
finished.append('\n');
finished.append(0);
finished.append('\n');
finished.append(3);
finished.append('\n');
finished.append(3);
finished.append('\n');
assertEquals(finished.toString(), progress.toString());
}
}
/**
* Test that doing update() on a bucket several times in a row (without re-fetching
* from getNext first) works
*/
@Test
public void testActiveUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group = \"yahoo.com\"", idFactory, 16, progress);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
BucketId superbucket = bp.getSuperbucket();
int usedBits = superbucket.getUsedBits();
iter.update(superbucket, new BucketId(usedBits + 2, superbucket.getId() | (2L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
iter.update(superbucket, new BucketId(usedBits + 2, superbucket.getId() | (1L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), superbucket);
assertEquals(bp.getProgress(), new BucketId(usedBits + 2, superbucket.getId() | (1L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
}
/**
* Test that ensures doing update(superbucket, 0) simply puts the bucket back in
* pending
*/
@Test
public void testNullAndSuperUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group = \"yahoo.com\"", idFactory, 16, progress);
assertEquals(progress.getPendingBucketCount(), 1);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getProgress(), new BucketId());
BucketId superbucket = bp.getSuperbucket();
BucketId sub = bp.getProgress();
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(superbucket, ProgressToken.NULL_BUCKET);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
VisitorIterator.BucketProgress bp2 = iter.getNext();
assertEquals(bp2.getSuperbucket(), superbucket);
assertEquals(bp2.getProgress(), ProgressToken.NULL_BUCKET);
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(superbucket, superbucket);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
bp2 = iter.getNext();
assertEquals(bp2.getSuperbucket(), superbucket);
assertEquals(bp2.getProgress(), superbucket);
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
}
@Test
public void testDeserializedFinishedProgress() {
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file\n");
finished.append(17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
ProgressToken token = new ProgressToken(finished.toString());
assertEquals(token.getDistributionBitCount(), 17);
assertEquals(token.getTotalBucketCount(), 1L << 17);
assertEquals(token.getFinishedBucketCount(), 1L << 17);
assertEquals(token.getBucketCursor(), 1L << 17);
assertTrue(token.isFinished());
ProgressToken token2 = new ProgressToken(token.serialize());
assertEquals(17, token2.getDistributionBitCount());
assertEquals(1L << 17, token2.getTotalBucketCount());
assertEquals(1L << 17, token2.getFinishedBucketCount());
assertEquals(1L << 17, token2.getBucketCursor());
assertTrue(token2.isFinished());
}
@Test
public void testBucketProgressFraction() {
double epsilon = 0.00001;
BucketId b_0 = new BucketId();
BucketId b_100_0 = new BucketId(16, 1234);
BucketId b_50_1 = new BucketId(17, 1234);
BucketId b_100_1 = new BucketId(17, 1234 | (1 << 16));
BucketId b_25_2 = new BucketId(18, 1234);
BucketId b_50_2 = new BucketId(18, 1234 | (2 << 16));
BucketId b_75_2 = new BucketId(18, 1234 | (1 << 16));
BucketId b_100_2 = new BucketId(18, 1234 | (3 << 16));
ProgressToken p = new ProgressToken(16);
BucketId sb = new BucketId(16, 1234);
assertEquals(p.progressFraction(new BucketId(32, 1234), b_0), 0.0, epsilon);
assertEquals(p.progressFraction(sb, b_100_0), 1.0, epsilon);
assertEquals(p.progressFraction(sb, b_50_1), 0.5, epsilon);
assertEquals(p.progressFraction(sb, b_100_1), 1.0, epsilon);
assertEquals(p.progressFraction(sb, b_25_2), 0.25, epsilon);
assertEquals(p.progressFraction(sb, b_50_2), 0.5, epsilon);
assertEquals(p.progressFraction(sb, b_75_2), 0.75, epsilon);
assertEquals(p.progressFraction(sb, b_100_2), 1.0, epsilon);
assertEquals(p.progressFraction(new BucketId(0x8000000000000000L),
new BucketId(0xb0000fff00000000L)), 1.0, epsilon);
}
@Test
public void testProgressEstimation() throws ParseException {
int distBits = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), 4);
double epsilon = 0.00001;
assertEquals(progress.percentFinished(), 0, epsilon);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 6.25, epsilon);
assertEquals(progress.getFinishedBucketCount(), 1);
bp = iter.getNext();
VisitorIterator.BucketProgress bp3 = iter.getNext();
VisitorIterator.BucketProgress bp4 = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 12.5, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp3.getSuperbucket(), new BucketId(distBits + 2, bp3.getSuperbucket().getId() | (1 << distBits)));
assertEquals(progress.percentFinished(), 17.1875, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp4.getSuperbucket(), new BucketId(distBits + 2, bp4.getSuperbucket().getId()));
assertEquals(progress.percentFinished(), 18.75, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp4.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.update(bp3.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 25, epsilon);
assertEquals(progress.getFinishedBucketCount(), 4);
while (iter.hasNext()) {
bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertEquals(progress.getFinishedBucketCount(), 16);
assertEquals(progress.percentFinished(), 100, epsilon);
}
@Test
public void testBucketKeyWrapperOrdering() {
ProgressToken.BucketKeyWrapper bk1 = new ProgressToken.BucketKeyWrapper(0x0000000000000001L);
ProgressToken.BucketKeyWrapper bk2 = new ProgressToken.BucketKeyWrapper(0x7FFFFFFFFFFFFFFFL);
ProgressToken.BucketKeyWrapper bk3 = new ProgressToken.BucketKeyWrapper(0x8000000000000000L);
ProgressToken.BucketKeyWrapper bk4 = new ProgressToken.BucketKeyWrapper(0xFFFFFFFFFFFFFFFFL);
assertTrue(bk1.compareTo(bk2) < 0);
assertTrue(bk2.compareTo(bk3) < 0);
assertTrue(bk3.compareTo(bk4) < 0);
assertTrue(bk2.compareTo(bk1) > 0);
assertTrue(bk3.compareTo(bk2) > 0);
assertTrue(bk4.compareTo(bk3) > 0);
ProgressToken.BucketKeyWrapper bk5 = new ProgressToken.BucketKeyWrapper(0x7FFFFFFFFFFFFFFFL);
ProgressToken.BucketKeyWrapper bk6 = new ProgressToken.BucketKeyWrapper(0x8000000000000000L);
assertTrue(bk5.compareTo(bk2) == 0);
assertTrue(bk6.compareTo(bk3) == 0);
}
private void doTestBucketKeyGeneration(int db) {
ProgressToken.BucketKeyWrapper[] keys = new ProgressToken.BucketKeyWrapper[1 << db];
for (int i = 0; i < (1 << db); ++i) {
keys[i] = new ProgressToken.BucketKeyWrapper(
ProgressToken.bucketToKey(new BucketId(db, i).getId()));
}
Arrays.sort(keys);
boolean consistentKeys = true;
for (int i = 0; i < (1 << db); ++i) {
long genKey = ProgressToken.makeNthBucketKey(i, db);
long knownKey = keys[i].getKey();
if (genKey != knownKey) {
consistentKeys = false;
break;
}
}
assertTrue(consistentKeys);
}
@Test
public void testBucketKeyGeneration() {
for (int i = 1; i < 14; ++i) {
doTestBucketKeyGeneration(i);
}
}
@Test
public void testSingleBucketSplits() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 0));
iter.update(bp.getSuperbucket(), new BucketId());
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 0));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 0));
assertEquals(bp.getProgress(), new BucketId(0));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 4));
assertEquals(bp.getProgress(), new BucketId(0));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 2));
iter.update(bp.getSuperbucket(), new BucketId(db + 3, 0x12));
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 2));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 2));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x12));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 6));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x12));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 1));
iter.update(bp.getSuperbucket(), new BucketId(db + 3, 0x15));
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 1));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 1));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x15));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 5));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x15));
}
/**
* Test increasing the distribution bits for a full bucket space range
* source with no finished, active or pending buckets
* @throws ParseException upon docsel parse failure (shouldn't happen)
*/
@Test
public void testRangeDistributionBitIncrease1NoPending() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
assertEquals(p.getTotalBucketCount(), 4);
iter.setDistributionBitCount(db + 1);
assertEquals(p.getTotalBucketCount(), 8);
assertEquals(p.getDistributionBitCount(), db + 1);
assertEquals(iter.getDistributionBitCount(), db + 1);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db + 1);
int[] desired = new int[] { 0, 4, 2, 6, 1, 5, 3, 7 };
for (int i = 0; i < 8; ++i) {
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, desired[i]));
}
}
@Test
public void testRangeDistributionBitIncrease1AllBucketStates() throws ParseException {
int db = 3;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[3];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
bpp[2] = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
iter.update(bpp[2].getSuperbucket(), new BucketId());
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getPendingBucketCount(), 3);
assertEquals(p.getActiveBucketCount(), 0);
iter.setDistributionBitCount(db + 1);
assertEquals(p.getTotalBucketCount(), 16);
assertEquals(p.getFinishedBucketCount(), 2);
assertEquals(p.getPendingBucketCount(), 6);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getDistributionBitCount(), db + 1);
assertEquals(iter.getDistributionBitCount(), db + 1);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db + 1);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x04));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0C));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x02));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0A));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x06));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0E));
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x01));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x09));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x05));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0D));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x03));
}
@Test
public void testRangeDistributionIncreaseMultipleBits() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[2];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
VisitorIterator.BucketProgress bpa = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
iter.setDistributionBitCount(20);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getDistributionBitCount(), 20);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 20);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertTrue(iter.getBucketSource().shouldYield());
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getActiveBucketCount(), 1);
iter.update(bpa.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(p.getDistributionBitCount(), 20);
assertEquals(p.getPendingBucketCount(), 32);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 96);
assertEquals(p.getFinishedBucketCount(), 16 * 4);
for (int i = 0; i < 32; ++i) {
long testKey = ProgressToken.makeNthBucketKey(i + 48, 20);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(ProgressToken.keyToBucketId(testKey)));
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 16 * 6);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(20, 0x6000));
}
@Test
public void testSingleBucketMerge() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), new BucketId());
p.splitPendingBucket(new BucketId(db, 0));
assertEquals(p.getPendingBucketCount(), 2);
p.mergePendingBucket(new BucketId(db + 1, 0));
assertEquals(p.getPendingBucketCount(), 1);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 0));
}
@Test
public void testRangeDistributionBitDecrease1() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
assertTrue(src.isLosslessResetPossible());
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertFalse(src.isLosslessResetPossible());
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[6];
for (int i = 0; i < 6; ++i) {
bpp[i] = iter.getNext();
}
VisitorIterator.BucketProgress bpa = iter.getNext();
for (int i = 0; i < 6; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertEquals(p.getBucketCursor(), 10);
iter.setDistributionBitCount(db - 1);
assertEquals(iter.getDistributionBitCount(), db - 1);
assertEquals(p.getDistributionBitCount(), db);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db - 1);
assertTrue(iter.getBucketSource().shouldYield());
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(bpa.getSuperbucket(), new BucketId());
assertEquals(p.getDistributionBitCount(), db - 1);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 4);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getBucketCursor(), 5);
}
@Test
public void testRangeDistributionBitIncreaseDecrease() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
assertTrue(src.isLosslessResetPossible());
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[4];
for (int i = 0; i < 4; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 4; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertFalse(src.isLosslessResetPossible());
iter.setDistributionBitCount(20);
assertEquals(p.getDistributionBitCount(), 20);
assertEquals(p.getPendingBucketCount(), 4 << 4);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getBucketCursor(), 5 << 4);
iter.setDistributionBitCount(16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 4);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getBucketCursor(), 5);
}
@Test
public void testRangeDistributionBitChangeWithoutDone() throws ParseException {
int db = 11;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[4];
for (int i = 0; i < 4; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 2; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertFalse(src.isLosslessResetPossible());
iter.setDistributionBitCount(9);
assertEquals(p.getDistributionBitCount(), 11);
assertEquals(p.getActiveBucketCount(), 2);
assertEquals(p.getPendingBucketCount(), 2);
assertTrue(iter.getBucketSource().shouldYield());
iter.update(bpp[3].getSuperbucket(), new BucketId(15, bpp[3].getSuperbucket().getId()));
iter.setDistributionBitCount(12);
assertEquals(p.getActiveBucketCount(), 1);
assertEquals(p.getPendingBucketCount(), 3);
assertTrue(iter.getBucketSource().shouldYield());
String serialized = p.toString();
iter.update(bpp[2].getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 3 * 2);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getFinishedBucketCount(), 2);
ProgressToken p2 = new ProgressToken(serialized);
assertEquals(p2.getDistributionBitCount(), 11);
BucketIdFactory idFactory2 = new BucketIdFactory();
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, p2);
assertEquals(iter2.getDistributionBitCount(), 11);
assertEquals(p2.getDistributionBitCount(), 11);
iter2.setDistributionBitCount(12);
assertEquals(p2.getDistributionBitCount(), 12);
assertEquals(p2.getPendingBucketCount(), 8);
assertEquals(p2.getBucketCursor(), 8);
assertEquals(p2.getFinishedBucketCount(), 0);
}
@Test
public void testRangeDistributionBitInitialDrop() throws ParseException {
int db = 31;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress[] bp = new VisitorIterator.BucketProgress[3];
bp[0] = iter.getNext();
bp[1] = iter.getNext();
bp[2] = iter.getNext();
iter.update(bp[2].getSuperbucket(), new BucketId());
iter.update(bp[1].getSuperbucket(), new BucketId());
assertEquals(p.getActiveBucketCount(), 1);
iter.setDistributionBitCount(11);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(new BucketId(31, 0), new BucketId());
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
}
@Test
public void testRangeDistributionLosslessReset() throws ParseException {
int db = 1;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
VisitorIterator.BucketProgress[] bp = new VisitorIterator.BucketProgress[2];
bp[0] = iter.getNext();
bp[1] = iter.getNext();
String serialized = p.toString();
assertFalse(src.isLosslessResetPossible());
iter.update(bp[1].getSuperbucket(), new BucketId());
assertEquals(p.getActiveBucketCount(), 1);
iter.setDistributionBitCount(11);
assertFalse(src.isLosslessResetPossible());
assertEquals(p.getDistributionBitCount(), 1);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(new BucketId(1, 0), new BucketId());
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
assertEquals(p.getDistributionBitCount(), 11);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
p = new ProgressToken(serialized);
idFactory = new BucketIdFactory();
iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
iter.setDistributionBitCount(11);
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
assertEquals(p.getDistributionBitCount(), 11);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
}
@Test
public void testExplicitDistributionBitIncrease() throws ParseException {
int distBits = 12;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, p);
assertEquals(iter.getDistributionBitCount(), distBits);
assertEquals(p.getDistributionBitCount(), distBits);
assertEquals(iter.getBucketSource().getDistributionBitCount(), distBits);
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.setDistributionBitCount(16);
assertEquals(iter.getDistributionBitCount(), 16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getTotalBucketCount(), 3);
}
@Test
public void testExplicitDistributionBitDecrease() throws ParseException {
int distBits = 20;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, p);
assertEquals(iter.getDistributionBitCount(), distBits);
assertEquals(p.getDistributionBitCount(), distBits);
assertEquals(iter.getBucketSource().getDistributionBitCount(), distBits);
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.setDistributionBitCount(16);
assertEquals(iter.getDistributionBitCount(), 16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getTotalBucketCount(), 3);
}
@Test
public void testExplicitDistributionImportNoTruncation() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, 20, p);
assertEquals(20, iter.getDistributionBitCount());
assertEquals(20, p.getDistributionBitCount());
assertEquals(20, iter.getBucketSource().getDistributionBitCount());
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
String serialized = p.toString();
ProgressToken p2 = new ProgressToken(serialized);
BucketIdFactory idFactory2 = new BucketIdFactory();
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory2, 1, p2);
assertEquals(20, iter2.getDistributionBitCount());
assertEquals(20, p2.getDistributionBitCount());
assertEquals(20, iter2.getBucketSource().getDistributionBitCount());
assertEquals(2, p2.getPendingBucketCount());
assertEquals(1, p2.getFinishedBucketCount());
assertEquals(3, p2.getTotalBucketCount());
}
@Test
public void testImportProgressWithOutdatedDistribution() throws ParseException {
String input = "VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n";
int db = 12;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
assertEquals(10, p.getDistributionBitCount());
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
iter.setDistributionBitCount(12);
assertEquals(iter.getDistributionBitCount(), 12);
assertEquals(p.getDistributionBitCount(), 12);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 12);
assertEquals(p.getTotalBucketCount(), 1 << 12);
assertEquals(p.getFinishedBucketCount(), 500 << 2);
assertEquals(p.getPendingBucketCount(), 3 << 2);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 503 << 2);
assertTrue(iter.hasNext());
ProgressToken p2 = new ProgressToken(p.serialize());
assertEquals(p2.getDistributionBitCount(), 12);
assertEquals(p2.getTotalBucketCount(), 1 << 12);
assertEquals(p2.getFinishedBucketCount(), 500 << 2);
assertEquals(p2.getPendingBucketCount(), 3 << 2);
assertEquals(p2.getActiveBucketCount(), 0);
assertEquals(p2.getBucketCursor(), 503 << 2);
}
@Test
public void testImportInconsistentProgressIncrease() throws ParseException {
String input = "VDS bucket progress file\n" +
"7\n" +
"32\n" +
"24\n" +
"128\n" +
"100000000000000c:0\n";
int db = 8;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
assertEquals(7, p.getDistributionBitCount());
assertEquals(p.getTotalBucketCount(), 1 << 7);
assertEquals(p.getFinishedBucketCount(), 24);
assertEquals(p.getPendingBucketCount(), 1);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(32, p.getBucketCursor());
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
assertEquals(7, p.getDistributionBitCount());
assertEquals(p.getPendingBucketCount(), 1 << 3);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(24 + (1 << 3), p.getBucketCursor());
iter.setDistributionBitCount(8);
assertEquals(iter.getDistributionBitCount(), 8);
assertEquals(p.getDistributionBitCount(), 8);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 8);
assertEquals(p.getTotalBucketCount(), 1 << 8);
assertEquals(p.getFinishedBucketCount(), 24 << 1);
assertEquals(p.getPendingBucketCount(), 1 << 4);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 24*2 + (1 << 4));
assertTrue(iter.hasNext());
}
@Test
public void testImportInconsistentProgressDecrease() throws ParseException {
String input = "VDS bucket progress file\n" +
"7\n" +
"32\n" +
"24\n" +
"128\n" +
"100000000000000c:0\n";
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
assertEquals(iter.getDistributionBitCount(), 7);
iter.setDistributionBitCount(6);
assertEquals(iter.getDistributionBitCount(), 6);
assertEquals(p.getDistributionBitCount(), 6);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 6);
assertEquals(p.getTotalBucketCount(), 1 << 6);
assertEquals(p.getFinishedBucketCount(), 24 >> 1);
assertEquals(p.getPendingBucketCount(), 1 << 2);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 24/2 + (1 << 2));
assertTrue(iter.hasNext());
}
@Test
public void testEntireBucketSpaceCovered() throws ParseException {
int db = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[3];
for (int i = 0; i < 3; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 3; ++i) {
iter.update(bpp[i].getSuperbucket(),
new BucketId(db + 1, bpp[i].getSuperbucket().getId()));
}
Set<BucketId> buckets = new TreeSet<BucketId>();
db = 7;
for (int i = 0; i < (1 << db); ++i) {
buckets.add(new BucketId(db, i));
}
iter.setDistributionBitCount(db);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 3 << 3);
while (iter.hasNext()) {
VisitorIterator.BucketProgress bp = iter.getNext();
assertTrue(buckets.contains(bp.getSuperbucket()));
buckets.remove(bp.getSuperbucket());
}
assertTrue(buckets.isEmpty());
}
@Test
public void testExceptionOnWrongDocumentSelection() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
boolean caughtIt = false;
try {
ProgressToken p = new ProgressToken("VDS bucket progress file\n16\n3\n1\n3\n"
+ "8000000000001f49:0\n8000000000001a85:0\n");
VisitorIterator.createFromDocumentSelection("id.group != \"yahoo.com\"", idFactory, 16, p);
}
catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
caughtIt = false;
try {
ProgressToken p = new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n");
VisitorIterator.createFromDocumentSelection("id.group=\"yahoo.com\" or id.user=555", idFactory, 16, p);
}
catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testIsBucketFinished() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 4, p);
assertFalse(p.isBucketFinished(new BucketId(32, 0)));
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(p.isBucketFinished(new BucketId(32, 0)));
assertFalse(p.isBucketFinished(new BucketId(32, 1 << 3)));
VisitorIterator.BucketProgress bp = iter.getNext();
assertFalse(p.isBucketFinished(new BucketId(32, 1 << 3)));
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(p.isBucketFinished(new BucketId(32, 1 << 3)));
assertTrue(p.isBucketFinished(new BucketId(32, 0x12345670)));
assertTrue(p.isBucketFinished(new BucketId(32, 0x12345678)));
assertFalse(p.isBucketFinished(new BucketId(32, 0x12345671)));
assertFalse(p.isBucketFinished(new BucketId(32, 0x12345679)));
}
@Test
public void testInconsistentState() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[2];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
VisitorIterator.BucketProgress bpa = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
assertFalse(p.isInconsistentState());
iter.setDistributionBitCount(20);
assertTrue(p.isInconsistentState());
iter.update(bpa.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(p.isInconsistentState());
}
@Test
public void testMalformedProgressFile() {
boolean caughtIt = false;
try {
new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testFailOnTooFewLinesInFile() {
boolean caughtIt = false;
try {
new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testUnknownFirstHeaderLine() {
boolean caughtIt = false;
try {
new ProgressToken("Smurf Time 3000\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testBinaryProgressSerialization() {
String input = "VDS bucket progress file (48.828125% completed)\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n";
ProgressToken p = new ProgressToken(input);
byte[] buf = p.serialize();
ProgressToken p2 = new ProgressToken(buf);
assertEquals(input, p2.toString());
}
} | class VisitorIteratorTestCase {
@Test
public void testIterationSingleBucketUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user = 1234", idFactory, 1, progress);
assertFalse(progress.hasActive());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), 1);
assertFalse(iter.isDone());
assertTrue(iter.hasNext());
assertEquals(iter.getRemainingBucketCount(), 1);
VisitorIterator.BucketProgress b1 = iter.getNext();
assertEquals(b1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(b1.getProgress(), new BucketId());
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(iter.getRemainingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 1);
assertFalse(progress.hasPending());
BucketId sub = new BucketId(b1.getSuperbucket().getUsedBits() + 1, b1.getSuperbucket().getId());
iter.update(b1.getSuperbucket(), sub);
assertFalse(progress.hasActive());
assertEquals(progress.getPendingBucketCount(), 1);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(iter.getRemainingBucketCount(), 1);
VisitorIterator.BucketProgress b2 = iter.getNext();
assertEquals(b2.getSuperbucket(), new BucketId(32, 1234));
assertEquals(b2.getProgress(), new BucketId(33, 1234));
assertFalse(iter.hasNext());
assertEquals(progress.getActiveBucketCount(), 1);
assertFalse(progress.hasPending());
iter.update(b1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(progress.hasActive());
assertFalse(progress.hasPending());
assertFalse(iter.hasNext());
assertTrue(iter.isDone());
assertTrue(progress.isFinished());
assertEquals(progress.getFinishedBucketCount(), 1);
assertEquals(iter.getRemainingBucketCount(), 0);
}
@Test
public void testInvalidSlicing() throws ParseException {
int distBits = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 0, 0);
}
catch (IllegalArgumentException e) {
assertEquals("slices must be positive, but was 0", e.getMessage());
}
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 1, 1);
}
catch (IllegalArgumentException e) {
assertEquals("sliceId must be in [0, 1), but was 1", e.getMessage());
}
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 1, -1);
}
catch (IllegalArgumentException e) {
assertEquals("sliceId must be in [0, 1), but was -1", e.getMessage());
}
}
@Test
@Test
public void testValidSlicing() throws ParseException {
int distBits = 4;
long buckets = 1 << distBits;
BucketIdFactory idFactory = new BucketIdFactory();
for (int slices = 1; slices <= 2 * buckets; slices++) {
long bucketsTotal = 0;
for (int sliceId = 0; sliceId < slices; sliceId++) {
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, slices, sliceId);
String context = "slices: " + slices + ", sliceId: " + sliceId;
assertEquals(context, progress.getDistributionBitCount(), distBits);
assertTrue(context, iter.getBucketSource() instanceof VisitorIterator.DistributionRangeBucketSource);
assertEquals(context, progress.getFinishedBucketCount(), Math.min(buckets, sliceId));
assertEquals(context, progress.getTotalBucketCount(), buckets);
long bucketCount = 0;
while (iter.hasNext() && progress.getFinishedBucketCount() < buckets / 2) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
++bucketsTotal;
}
if (slices + sliceId < buckets) {
assertEquals(context, ((buckets / 2) + slices - sliceId - 1) / slices, bucketCount);
assertFalse(context, progress.hasActive());
assertFalse(context, progress.hasPending());
assertFalse(context, iter.isDone());
assertTrue(context, iter.hasNext());
assertEquals(context, progress.getFinishedBucketCount(), bucketCount * slices + sliceId);
assertFalse(context, progress.isFinished());
}
while (iter.hasNext()) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
++bucketsTotal;
}
assertEquals(context, (buckets + slices - sliceId - 1) / slices, bucketCount);
assertFalse(context, progress.hasActive());
assertFalse(context, progress.hasPending());
assertTrue(context, iter.isDone());
assertFalse(context, iter.hasNext());
assertEquals(context, progress.getFinishedBucketCount(), buckets);
assertTrue(context, progress.isFinished());
}
assertEquals("slices: " + slices, buckets, bucketsTotal);
}
}
@Test
public void testProgressSerializationRange() throws ParseException {
int distBits = 4;
int buckets = 1 << distBits;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), distBits);
assertTrue(iter.getBucketSource() instanceof VisitorIterator.DistributionRangeBucketSource);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), buckets);
long bucketCount = 0;
long bucketStop = buckets / 2;
while (iter.hasNext() && bucketCount != bucketStop) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertEquals(bucketCount, bucketStop);
assertFalse(progress.hasActive());
assertFalse(progress.hasPending());
assertFalse(iter.isDone());
assertTrue(iter.hasNext());
assertEquals(progress.getFinishedBucketCount(), bucketCount);
assertFalse(progress.isFinished());
StringBuilder desired = new StringBuilder();
desired.append("VDS bucket progress file (50.0% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(buckets);
desired.append('\n');
assertEquals(desired.toString(), progress.toString());
BucketIdFactory idFactory2 = new BucketIdFactory();
{
ProgressToken progDs = new ProgressToken(progress.toString());
assertEquals(progDs.getDistributionBitCount(), distBits);
assertEquals(progDs.getTotalBucketCount(), buckets);
assertEquals(progDs.getFinishedBucketCount(), bucketCount);
VisitorIterator iterDs = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, progDs);
assertFalse(progDs.hasPending());
assertFalse(progDs.hasActive());
assertTrue(iterDs.hasNext());
assertFalse(iterDs.isDone());
assertEquals(distBits, iterDs.getDistributionBitCount());
assertEquals(distBits, progDs.getDistributionBitCount());
VisitorIterator.BucketProgress idDs = iterDs.getNext();
long resumeKey = ProgressToken.makeNthBucketKey(bucketCount, distBits);
assertEquals(idDs.getSuperbucket(), new BucketId(ProgressToken.keyToBucketId(resumeKey)));
assertEquals(idDs.getProgress(), new BucketId());
}
int pendingTotal = buckets / 8;
int activeTotal = buckets / 8;
Vector<VisitorIterator.BucketProgress> trackedBuckets = new Vector<VisitorIterator.BucketProgress>();
for (int i = 0; i < pendingTotal + activeTotal; ++i) {
trackedBuckets.add(iter.getNext());
}
for (int i = 0; i < pendingTotal + activeTotal; ++i) {
VisitorIterator.BucketProgress idTemp = trackedBuckets.get(i);
if (i < activeTotal) {
iter.update(idTemp.getSuperbucket(),
new BucketId(distBits + 2, idTemp.getSuperbucket().getId() | (2 * buckets)));
}
}
assertEquals(progress.getActiveBucketCount(), activeTotal);
assertEquals(progress.getPendingBucketCount(), pendingTotal);
desired = new StringBuilder();
desired.append("VDS bucket progress file (").append(progress.percentFinished()).append("% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(bucketCount + pendingTotal + activeTotal);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(buckets);
desired.append('\n');
assertEquals(progress.getBuckets().entrySet().size(), pendingTotal + activeTotal);
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progress.getBuckets().entrySet()) {
desired.append(Long.toHexString(ProgressToken.keyToBucketId(entry.getKey().getKey())));
desired.append(':');
desired.append(Long.toHexString(entry.getValue().getProgress().getRawId()));
desired.append('\n');
}
assertEquals(progress.toString(), desired.toString());
{
ProgressToken progDs = new ProgressToken(progress.toString());
assertEquals(progDs.getDistributionBitCount(), distBits);
assertEquals(progDs.getTotalBucketCount(), buckets);
assertEquals(progDs.getFinishedBucketCount(), bucketCount);
VisitorIterator iterDs = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, progDs);
assertEquals(progDs.getPendingBucketCount(), pendingTotal + activeTotal);
assertEquals(distBits, progDs.getDistributionBitCount());
assertEquals(distBits, iterDs.getDistributionBitCount());
assertFalse(progDs.hasActive());
assertTrue(iterDs.hasNext());
assertFalse(iterDs.isDone());
assertEquals(progDs.getBucketCursor(), bucketCount + pendingTotal + activeTotal);
}
for (int i = activeTotal; i < activeTotal + pendingTotal; ++i) {
iter.update(trackedBuckets.get(i).getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertEquals(progress.getActiveBucketCount(), 0);
boolean consistentNext = true;
while (!iter.isDone()) {
if (!iter.hasNext()) {
consistentNext = false;
break;
}
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertTrue(consistentNext);
assertFalse(iter.hasNext());
assertTrue(progress.isFinished());
assertEquals(bucketCount, buckets);
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file (100.0% completed)\n");
finished.append(distBits);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
assertEquals(progress.toString(), finished.toString());
}
@Test
public void testProgressSerializationExplicit() throws ParseException {
int distBits = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), distBits);
assertTrue(iter.getBucketSource() instanceof VisitorIterator.ExplicitBucketSource);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), 3);
assertEquals(progress.getPendingBucketCount(), 3);
VisitorIterator.BucketProgress bp1 = iter.getNext();
VisitorIterator.BucketProgress bp2 = iter.getNext();
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 2);
assertEquals(bp1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp1.getProgress(), new BucketId());
iter.update(bp1.getSuperbucket(), new BucketId(36, 1234));
assertEquals(progress.getPendingBucketCount(), 2);
assertEquals(bp2.getSuperbucket(), new BucketId(32, 8009));
assertEquals(bp2.getProgress(), new BucketId());
{
StringBuilder desired = new StringBuilder();
desired.append("VDS bucket progress file (").append(progress.percentFinished()).append("% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(0);
desired.append('\n');
desired.append(0);
desired.append('\n');
desired.append(3);
desired.append('\n');
desired.append(Long.toHexString(new BucketId(32, 1234).getRawId()));
desired.append(':');
desired.append(Long.toHexString(new BucketId(36, 1234).getRawId()));
desired.append('\n');
desired.append(Long.toHexString(new BucketId(32, 8009).getRawId()));
desired.append(":0\n");
desired.append(Long.toHexString(new BucketId(32, 6789).getRawId()));
desired.append(":0\n");
assertEquals(desired.toString(), progress.toString());
ProgressToken prog2 = new ProgressToken(progress.toString());
assertEquals(prog2.getDistributionBitCount(), distBits);
assertEquals(prog2.getTotalBucketCount(), 3);
assertEquals(prog2.getFinishedBucketCount(), 0);
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, prog2);
assertEquals(prog2.getPendingBucketCount(), 3);
assertFalse(prog2.hasActive());
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
assertTrue(iter2.getBucketSource() instanceof VisitorIterator.ExplicitBucketSource);
assertFalse(iter2.getBucketSource().hasNext());
VisitorIterator.BucketProgress bp = iter2.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp.getProgress(), new BucketId(36, 1234));
assertEquals(prog2.getPendingBucketCount(), 2);
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
bp = iter2.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(32, 8009));
assertEquals(bp.getProgress(), new BucketId());
assertEquals(prog2.getPendingBucketCount(), 1);
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
bp = iter2.getNext();
assertEquals(prog2.getPendingBucketCount(), 0);
assertEquals(bp.getSuperbucket(), new BucketId(32, 6789));
assertEquals(bp.getProgress(), new BucketId());
assertFalse(iter2.hasNext());
assertFalse(iter2.isDone());
assertEquals(prog2.getActiveBucketCount(), 3);
}
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
bp1 = iter.getNext();
assertEquals(bp1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp1.getProgress(), new BucketId(36, 1234));
iter.update(bp1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
bp1 = iter.getNext();
assertEquals(bp1.getSuperbucket(), new BucketId(32, 6789));
assertEquals(bp1.getProgress(), new BucketId());
assertEquals(
progress.toString(),
"VDS bucket progress file (" + progress.percentFinished() + "% completed)\n" +
"16\n" +
"0\n" +
"1\n" +
"3\n" +
"8000000000001f49:0\n" +
"8000000000001a85:0\n");
iter.update(bp1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(bp2.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(iter.hasNext());
assertTrue(iter.isDone());
assertTrue(progress.isFinished());
assertEquals(progress.getActiveBucketCount(), 0);
{
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file (100.0% completed)\n");
finished.append(distBits);
finished.append('\n');
finished.append(0);
finished.append('\n');
finished.append(3);
finished.append('\n');
finished.append(3);
finished.append('\n');
assertEquals(finished.toString(), progress.toString());
}
}
/**
* Test that doing update() on a bucket several times in a row (without re-fetching
* from getNext first) works
*/
@Test
public void testActiveUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group = \"yahoo.com\"", idFactory, 16, progress);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
BucketId superbucket = bp.getSuperbucket();
int usedBits = superbucket.getUsedBits();
iter.update(superbucket, new BucketId(usedBits + 2, superbucket.getId() | (2L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
iter.update(superbucket, new BucketId(usedBits + 2, superbucket.getId() | (1L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), superbucket);
assertEquals(bp.getProgress(), new BucketId(usedBits + 2, superbucket.getId() | (1L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
}
/**
* Test that ensures doing update(superbucket, 0) simply puts the bucket back in
* pending
*/
@Test
public void testNullAndSuperUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group = \"yahoo.com\"", idFactory, 16, progress);
assertEquals(progress.getPendingBucketCount(), 1);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getProgress(), new BucketId());
BucketId superbucket = bp.getSuperbucket();
BucketId sub = bp.getProgress();
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(superbucket, ProgressToken.NULL_BUCKET);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
VisitorIterator.BucketProgress bp2 = iter.getNext();
assertEquals(bp2.getSuperbucket(), superbucket);
assertEquals(bp2.getProgress(), ProgressToken.NULL_BUCKET);
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(superbucket, superbucket);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
bp2 = iter.getNext();
assertEquals(bp2.getSuperbucket(), superbucket);
assertEquals(bp2.getProgress(), superbucket);
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
}
@Test
public void testDeserializedFinishedProgress() {
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file\n");
finished.append(17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
ProgressToken token = new ProgressToken(finished.toString());
assertEquals(token.getDistributionBitCount(), 17);
assertEquals(token.getTotalBucketCount(), 1L << 17);
assertEquals(token.getFinishedBucketCount(), 1L << 17);
assertEquals(token.getBucketCursor(), 1L << 17);
assertTrue(token.isFinished());
ProgressToken token2 = new ProgressToken(token.serialize());
assertEquals(17, token2.getDistributionBitCount());
assertEquals(1L << 17, token2.getTotalBucketCount());
assertEquals(1L << 17, token2.getFinishedBucketCount());
assertEquals(1L << 17, token2.getBucketCursor());
assertTrue(token2.isFinished());
}
@Test
public void testBucketProgressFraction() {
double epsilon = 0.00001;
BucketId b_0 = new BucketId();
BucketId b_100_0 = new BucketId(16, 1234);
BucketId b_50_1 = new BucketId(17, 1234);
BucketId b_100_1 = new BucketId(17, 1234 | (1 << 16));
BucketId b_25_2 = new BucketId(18, 1234);
BucketId b_50_2 = new BucketId(18, 1234 | (2 << 16));
BucketId b_75_2 = new BucketId(18, 1234 | (1 << 16));
BucketId b_100_2 = new BucketId(18, 1234 | (3 << 16));
ProgressToken p = new ProgressToken(16);
BucketId sb = new BucketId(16, 1234);
assertEquals(p.progressFraction(new BucketId(32, 1234), b_0), 0.0, epsilon);
assertEquals(p.progressFraction(sb, b_100_0), 1.0, epsilon);
assertEquals(p.progressFraction(sb, b_50_1), 0.5, epsilon);
assertEquals(p.progressFraction(sb, b_100_1), 1.0, epsilon);
assertEquals(p.progressFraction(sb, b_25_2), 0.25, epsilon);
assertEquals(p.progressFraction(sb, b_50_2), 0.5, epsilon);
assertEquals(p.progressFraction(sb, b_75_2), 0.75, epsilon);
assertEquals(p.progressFraction(sb, b_100_2), 1.0, epsilon);
assertEquals(p.progressFraction(new BucketId(0x8000000000000000L),
new BucketId(0xb0000fff00000000L)), 1.0, epsilon);
}
@Test
public void testProgressEstimation() throws ParseException {
int distBits = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), 4);
double epsilon = 0.00001;
assertEquals(progress.percentFinished(), 0, epsilon);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 6.25, epsilon);
assertEquals(progress.getFinishedBucketCount(), 1);
bp = iter.getNext();
VisitorIterator.BucketProgress bp3 = iter.getNext();
VisitorIterator.BucketProgress bp4 = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 12.5, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp3.getSuperbucket(), new BucketId(distBits + 2, bp3.getSuperbucket().getId() | (1 << distBits)));
assertEquals(progress.percentFinished(), 17.1875, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp4.getSuperbucket(), new BucketId(distBits + 2, bp4.getSuperbucket().getId()));
assertEquals(progress.percentFinished(), 18.75, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp4.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.update(bp3.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 25, epsilon);
assertEquals(progress.getFinishedBucketCount(), 4);
while (iter.hasNext()) {
bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertEquals(progress.getFinishedBucketCount(), 16);
assertEquals(progress.percentFinished(), 100, epsilon);
}
@Test
public void testBucketKeyWrapperOrdering() {
ProgressToken.BucketKeyWrapper bk1 = new ProgressToken.BucketKeyWrapper(0x0000000000000001L);
ProgressToken.BucketKeyWrapper bk2 = new ProgressToken.BucketKeyWrapper(0x7FFFFFFFFFFFFFFFL);
ProgressToken.BucketKeyWrapper bk3 = new ProgressToken.BucketKeyWrapper(0x8000000000000000L);
ProgressToken.BucketKeyWrapper bk4 = new ProgressToken.BucketKeyWrapper(0xFFFFFFFFFFFFFFFFL);
assertTrue(bk1.compareTo(bk2) < 0);
assertTrue(bk2.compareTo(bk3) < 0);
assertTrue(bk3.compareTo(bk4) < 0);
assertTrue(bk2.compareTo(bk1) > 0);
assertTrue(bk3.compareTo(bk2) > 0);
assertTrue(bk4.compareTo(bk3) > 0);
ProgressToken.BucketKeyWrapper bk5 = new ProgressToken.BucketKeyWrapper(0x7FFFFFFFFFFFFFFFL);
ProgressToken.BucketKeyWrapper bk6 = new ProgressToken.BucketKeyWrapper(0x8000000000000000L);
assertTrue(bk5.compareTo(bk2) == 0);
assertTrue(bk6.compareTo(bk3) == 0);
}
private void doTestBucketKeyGeneration(int db) {
ProgressToken.BucketKeyWrapper[] keys = new ProgressToken.BucketKeyWrapper[1 << db];
for (int i = 0; i < (1 << db); ++i) {
keys[i] = new ProgressToken.BucketKeyWrapper(
ProgressToken.bucketToKey(new BucketId(db, i).getId()));
}
Arrays.sort(keys);
boolean consistentKeys = true;
for (int i = 0; i < (1 << db); ++i) {
long genKey = ProgressToken.makeNthBucketKey(i, db);
long knownKey = keys[i].getKey();
if (genKey != knownKey) {
consistentKeys = false;
break;
}
}
assertTrue(consistentKeys);
}
@Test
public void testBucketKeyGeneration() {
for (int i = 1; i < 14; ++i) {
doTestBucketKeyGeneration(i);
}
}
@Test
public void testSingleBucketSplits() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 0));
iter.update(bp.getSuperbucket(), new BucketId());
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 0));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 0));
assertEquals(bp.getProgress(), new BucketId(0));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 4));
assertEquals(bp.getProgress(), new BucketId(0));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 2));
iter.update(bp.getSuperbucket(), new BucketId(db + 3, 0x12));
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 2));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 2));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x12));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 6));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x12));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 1));
iter.update(bp.getSuperbucket(), new BucketId(db + 3, 0x15));
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 1));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 1));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x15));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 5));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x15));
}
/**
* Test increasing the distribution bits for a full bucket space range
* source with no finished, active or pending buckets
* @throws ParseException upon docsel parse failure (shouldn't happen)
*/
@Test
public void testRangeDistributionBitIncrease1NoPending() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
assertEquals(p.getTotalBucketCount(), 4);
iter.setDistributionBitCount(db + 1);
assertEquals(p.getTotalBucketCount(), 8);
assertEquals(p.getDistributionBitCount(), db + 1);
assertEquals(iter.getDistributionBitCount(), db + 1);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db + 1);
int[] desired = new int[] { 0, 4, 2, 6, 1, 5, 3, 7 };
for (int i = 0; i < 8; ++i) {
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, desired[i]));
}
}
@Test
public void testRangeDistributionBitIncrease1AllBucketStates() throws ParseException {
int db = 3;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[3];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
bpp[2] = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
iter.update(bpp[2].getSuperbucket(), new BucketId());
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getPendingBucketCount(), 3);
assertEquals(p.getActiveBucketCount(), 0);
iter.setDistributionBitCount(db + 1);
assertEquals(p.getTotalBucketCount(), 16);
assertEquals(p.getFinishedBucketCount(), 2);
assertEquals(p.getPendingBucketCount(), 6);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getDistributionBitCount(), db + 1);
assertEquals(iter.getDistributionBitCount(), db + 1);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db + 1);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x04));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0C));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x02));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0A));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x06));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0E));
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x01));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x09));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x05));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0D));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x03));
}
@Test
public void testRangeDistributionIncreaseMultipleBits() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[2];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
VisitorIterator.BucketProgress bpa = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
iter.setDistributionBitCount(20);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getDistributionBitCount(), 20);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 20);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertTrue(iter.getBucketSource().shouldYield());
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getActiveBucketCount(), 1);
iter.update(bpa.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(p.getDistributionBitCount(), 20);
assertEquals(p.getPendingBucketCount(), 32);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 96);
assertEquals(p.getFinishedBucketCount(), 16 * 4);
for (int i = 0; i < 32; ++i) {
long testKey = ProgressToken.makeNthBucketKey(i + 48, 20);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(ProgressToken.keyToBucketId(testKey)));
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 16 * 6);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(20, 0x6000));
}
@Test
public void testSingleBucketMerge() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), new BucketId());
p.splitPendingBucket(new BucketId(db, 0));
assertEquals(p.getPendingBucketCount(), 2);
p.mergePendingBucket(new BucketId(db + 1, 0));
assertEquals(p.getPendingBucketCount(), 1);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 0));
}
@Test
public void testRangeDistributionBitDecrease1() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
assertTrue(src.isLosslessResetPossible());
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertFalse(src.isLosslessResetPossible());
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[6];
for (int i = 0; i < 6; ++i) {
bpp[i] = iter.getNext();
}
VisitorIterator.BucketProgress bpa = iter.getNext();
for (int i = 0; i < 6; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertEquals(p.getBucketCursor(), 10);
iter.setDistributionBitCount(db - 1);
assertEquals(iter.getDistributionBitCount(), db - 1);
assertEquals(p.getDistributionBitCount(), db);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db - 1);
assertTrue(iter.getBucketSource().shouldYield());
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(bpa.getSuperbucket(), new BucketId());
assertEquals(p.getDistributionBitCount(), db - 1);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 4);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getBucketCursor(), 5);
}
@Test
public void testRangeDistributionBitIncreaseDecrease() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
assertTrue(src.isLosslessResetPossible());
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[4];
for (int i = 0; i < 4; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 4; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertFalse(src.isLosslessResetPossible());
iter.setDistributionBitCount(20);
assertEquals(p.getDistributionBitCount(), 20);
assertEquals(p.getPendingBucketCount(), 4 << 4);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getBucketCursor(), 5 << 4);
iter.setDistributionBitCount(16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 4);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getBucketCursor(), 5);
}
@Test
public void testRangeDistributionBitChangeWithoutDone() throws ParseException {
int db = 11;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[4];
for (int i = 0; i < 4; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 2; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertFalse(src.isLosslessResetPossible());
iter.setDistributionBitCount(9);
assertEquals(p.getDistributionBitCount(), 11);
assertEquals(p.getActiveBucketCount(), 2);
assertEquals(p.getPendingBucketCount(), 2);
assertTrue(iter.getBucketSource().shouldYield());
iter.update(bpp[3].getSuperbucket(), new BucketId(15, bpp[3].getSuperbucket().getId()));
iter.setDistributionBitCount(12);
assertEquals(p.getActiveBucketCount(), 1);
assertEquals(p.getPendingBucketCount(), 3);
assertTrue(iter.getBucketSource().shouldYield());
String serialized = p.toString();
iter.update(bpp[2].getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 3 * 2);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getFinishedBucketCount(), 2);
ProgressToken p2 = new ProgressToken(serialized);
assertEquals(p2.getDistributionBitCount(), 11);
BucketIdFactory idFactory2 = new BucketIdFactory();
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, p2);
assertEquals(iter2.getDistributionBitCount(), 11);
assertEquals(p2.getDistributionBitCount(), 11);
iter2.setDistributionBitCount(12);
assertEquals(p2.getDistributionBitCount(), 12);
assertEquals(p2.getPendingBucketCount(), 8);
assertEquals(p2.getBucketCursor(), 8);
assertEquals(p2.getFinishedBucketCount(), 0);
}
@Test
public void testRangeDistributionBitInitialDrop() throws ParseException {
int db = 31;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress[] bp = new VisitorIterator.BucketProgress[3];
bp[0] = iter.getNext();
bp[1] = iter.getNext();
bp[2] = iter.getNext();
iter.update(bp[2].getSuperbucket(), new BucketId());
iter.update(bp[1].getSuperbucket(), new BucketId());
assertEquals(p.getActiveBucketCount(), 1);
iter.setDistributionBitCount(11);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(new BucketId(31, 0), new BucketId());
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
}
@Test
public void testRangeDistributionLosslessReset() throws ParseException {
int db = 1;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
VisitorIterator.BucketProgress[] bp = new VisitorIterator.BucketProgress[2];
bp[0] = iter.getNext();
bp[1] = iter.getNext();
String serialized = p.toString();
assertFalse(src.isLosslessResetPossible());
iter.update(bp[1].getSuperbucket(), new BucketId());
assertEquals(p.getActiveBucketCount(), 1);
iter.setDistributionBitCount(11);
assertFalse(src.isLosslessResetPossible());
assertEquals(p.getDistributionBitCount(), 1);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(new BucketId(1, 0), new BucketId());
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
assertEquals(p.getDistributionBitCount(), 11);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
p = new ProgressToken(serialized);
idFactory = new BucketIdFactory();
iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
iter.setDistributionBitCount(11);
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
assertEquals(p.getDistributionBitCount(), 11);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
}
@Test
public void testExplicitDistributionBitIncrease() throws ParseException {
int distBits = 12;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, p);
assertEquals(iter.getDistributionBitCount(), distBits);
assertEquals(p.getDistributionBitCount(), distBits);
assertEquals(iter.getBucketSource().getDistributionBitCount(), distBits);
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.setDistributionBitCount(16);
assertEquals(iter.getDistributionBitCount(), 16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getTotalBucketCount(), 3);
}
@Test
public void testExplicitDistributionBitDecrease() throws ParseException {
int distBits = 20;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, p);
assertEquals(iter.getDistributionBitCount(), distBits);
assertEquals(p.getDistributionBitCount(), distBits);
assertEquals(iter.getBucketSource().getDistributionBitCount(), distBits);
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.setDistributionBitCount(16);
assertEquals(iter.getDistributionBitCount(), 16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getTotalBucketCount(), 3);
}
@Test
public void testExplicitDistributionImportNoTruncation() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, 20, p);
assertEquals(20, iter.getDistributionBitCount());
assertEquals(20, p.getDistributionBitCount());
assertEquals(20, iter.getBucketSource().getDistributionBitCount());
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
String serialized = p.toString();
ProgressToken p2 = new ProgressToken(serialized);
BucketIdFactory idFactory2 = new BucketIdFactory();
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory2, 1, p2);
assertEquals(20, iter2.getDistributionBitCount());
assertEquals(20, p2.getDistributionBitCount());
assertEquals(20, iter2.getBucketSource().getDistributionBitCount());
assertEquals(2, p2.getPendingBucketCount());
assertEquals(1, p2.getFinishedBucketCount());
assertEquals(3, p2.getTotalBucketCount());
}
@Test
public void testImportProgressWithOutdatedDistribution() throws ParseException {
String input = "VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n";
int db = 12;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
assertEquals(10, p.getDistributionBitCount());
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
iter.setDistributionBitCount(12);
assertEquals(iter.getDistributionBitCount(), 12);
assertEquals(p.getDistributionBitCount(), 12);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 12);
assertEquals(p.getTotalBucketCount(), 1 << 12);
assertEquals(p.getFinishedBucketCount(), 500 << 2);
assertEquals(p.getPendingBucketCount(), 3 << 2);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 503 << 2);
assertTrue(iter.hasNext());
ProgressToken p2 = new ProgressToken(p.serialize());
assertEquals(p2.getDistributionBitCount(), 12);
assertEquals(p2.getTotalBucketCount(), 1 << 12);
assertEquals(p2.getFinishedBucketCount(), 500 << 2);
assertEquals(p2.getPendingBucketCount(), 3 << 2);
assertEquals(p2.getActiveBucketCount(), 0);
assertEquals(p2.getBucketCursor(), 503 << 2);
}
@Test
public void testImportInconsistentProgressIncrease() throws ParseException {
String input = "VDS bucket progress file\n" +
"7\n" +
"32\n" +
"24\n" +
"128\n" +
"100000000000000c:0\n";
int db = 8;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
assertEquals(7, p.getDistributionBitCount());
assertEquals(p.getTotalBucketCount(), 1 << 7);
assertEquals(p.getFinishedBucketCount(), 24);
assertEquals(p.getPendingBucketCount(), 1);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(32, p.getBucketCursor());
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
assertEquals(7, p.getDistributionBitCount());
assertEquals(p.getPendingBucketCount(), 1 << 3);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(24 + (1 << 3), p.getBucketCursor());
iter.setDistributionBitCount(8);
assertEquals(iter.getDistributionBitCount(), 8);
assertEquals(p.getDistributionBitCount(), 8);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 8);
assertEquals(p.getTotalBucketCount(), 1 << 8);
assertEquals(p.getFinishedBucketCount(), 24 << 1);
assertEquals(p.getPendingBucketCount(), 1 << 4);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 24*2 + (1 << 4));
assertTrue(iter.hasNext());
}
@Test
public void testImportInconsistentProgressDecrease() throws ParseException {
String input = "VDS bucket progress file\n" +
"7\n" +
"32\n" +
"24\n" +
"128\n" +
"100000000000000c:0\n";
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
assertEquals(iter.getDistributionBitCount(), 7);
iter.setDistributionBitCount(6);
assertEquals(iter.getDistributionBitCount(), 6);
assertEquals(p.getDistributionBitCount(), 6);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 6);
assertEquals(p.getTotalBucketCount(), 1 << 6);
assertEquals(p.getFinishedBucketCount(), 24 >> 1);
assertEquals(p.getPendingBucketCount(), 1 << 2);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 24/2 + (1 << 2));
assertTrue(iter.hasNext());
}
@Test
public void testEntireBucketSpaceCovered() throws ParseException {
int db = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[3];
for (int i = 0; i < 3; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 3; ++i) {
iter.update(bpp[i].getSuperbucket(),
new BucketId(db + 1, bpp[i].getSuperbucket().getId()));
}
Set<BucketId> buckets = new TreeSet<BucketId>();
db = 7;
for (int i = 0; i < (1 << db); ++i) {
buckets.add(new BucketId(db, i));
}
iter.setDistributionBitCount(db);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 3 << 3);
while (iter.hasNext()) {
VisitorIterator.BucketProgress bp = iter.getNext();
assertTrue(buckets.contains(bp.getSuperbucket()));
buckets.remove(bp.getSuperbucket());
}
assertTrue(buckets.isEmpty());
}
@Test
public void testExceptionOnWrongDocumentSelection() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
boolean caughtIt = false;
try {
ProgressToken p = new ProgressToken("VDS bucket progress file\n16\n3\n1\n3\n"
+ "8000000000001f49:0\n8000000000001a85:0\n");
VisitorIterator.createFromDocumentSelection("id.group != \"yahoo.com\"", idFactory, 16, p);
}
catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
caughtIt = false;
try {
ProgressToken p = new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n");
VisitorIterator.createFromDocumentSelection("id.group=\"yahoo.com\" or id.user=555", idFactory, 16, p);
}
catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testIsBucketFinished() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 4, p);
assertFalse(p.isBucketFinished(new BucketId(32, 0)));
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(p.isBucketFinished(new BucketId(32, 0)));
assertFalse(p.isBucketFinished(new BucketId(32, 1 << 3)));
VisitorIterator.BucketProgress bp = iter.getNext();
assertFalse(p.isBucketFinished(new BucketId(32, 1 << 3)));
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(p.isBucketFinished(new BucketId(32, 1 << 3)));
assertTrue(p.isBucketFinished(new BucketId(32, 0x12345670)));
assertTrue(p.isBucketFinished(new BucketId(32, 0x12345678)));
assertFalse(p.isBucketFinished(new BucketId(32, 0x12345671)));
assertFalse(p.isBucketFinished(new BucketId(32, 0x12345679)));
}
@Test
public void testInconsistentState() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[2];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
VisitorIterator.BucketProgress bpa = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
assertFalse(p.isInconsistentState());
iter.setDistributionBitCount(20);
assertTrue(p.isInconsistentState());
iter.update(bpa.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(p.isInconsistentState());
}
@Test
public void testMalformedProgressFile() {
boolean caughtIt = false;
try {
new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testFailOnTooFewLinesInFile() {
boolean caughtIt = false;
try {
new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testUnknownFirstHeaderLine() {
boolean caughtIt = false;
try {
new ProgressToken("Smurf Time 3000\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testBinaryProgressSerialization() {
String input = "VDS bucket progress file (48.828125% completed)\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n";
ProgressToken p = new ProgressToken(input);
byte[] buf = p.serialize();
ProgressToken p2 = new ProgressToken(buf);
assertEquals(input, p2.toString());
}
} | |
Agree, these tests should be rewritten to use DeploymentTester. | public void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"));
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2,
InstanceName.from("main"), 8))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1,
InstanceName.from("main"), 1))
.applicationEndpoint("c", "qrs", zone1.region().value(),
Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6))
.build();
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.aws-us-east-1c.test.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.test.z.vespa-app.cloud",
"a1.t1.aws-us-east-1c.staging.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
} | List<String> expectedSans = List.of( | public void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"));
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2,
InstanceName.from("main"), 8))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1,
InstanceName.from("main"), 1))
.applicationEndpoint("c", "qrs", zone1.region().value(),
Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6))
.build();
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.aws-us-east-1c.test.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.test.z.vespa-app.cloud",
"a1.t1.aws-us-east-1c.staging.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock();
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@Before
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
public void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_public_prod() {
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.aws-us-east-1c.test.z.vespa-app.cloud",
"*.default.default.aws-us-east-1c.test.z.vespa-app.cloud",
"default.default.aws-us-east-1c.staging.z.vespa-app.cloud",
"*.default.default.aws-us-east-1c.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id",
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
public void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "uuid", List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
public void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().skip(1).findFirst().orElseThrow().getId();
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().requestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
public void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"<deployment version=\"1.0\">\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">aws-us-east-1a</region>\n" +
" <region active=\"true\">ap-northeast-1</region>\n" +
" </prod>\n" +
" </instance>\n" +
"</deployment>\n");
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock();
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@Before
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
public void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_public_prod() {
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.aws-us-east-1c.test.z.vespa-app.cloud",
"*.default.default.aws-us-east-1c.test.z.vespa-app.cloud",
"default.default.aws-us-east-1c.staging.z.vespa-app.cloud",
"*.default.default.aws-us-east-1c.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id",
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
public void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "uuid", List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
public void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().skip(1).findFirst().orElseThrow().getId();
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().requestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
public void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"<deployment version=\"1.0\">\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">aws-us-east-1a</region>\n" +
" <region active=\"true\">ap-northeast-1</region>\n" +
" </prod>\n" +
" </instance>\n" +
"</deployment>\n");
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
} |
```suggestion // Handling wrong distribution reply, but inconsistent state due to the other, active bucket. ``` | public void testIgnoredSlicing() throws ParseException {
int distBits = 1;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 3, 2);
assertTrue(iter.hasNext());
VisitorIterator.BucketProgress first = iter.getNext();
assertEquals(ProgressToken.toBucketId(0, 1), first.getSuperbucket());
VisitorIterator.BucketProgress second = iter.getNext();
assertEquals(ProgressToken.toBucketId(1, 1), second.getSuperbucket());
assertFalse(iter.hasNext());
iter.update(first.getSuperbucket(), first.getProgress());
iter.setDistributionBitCount(2);
assertEquals(2, iter.getDistributionBitCount());
assertEquals(1, progress.getDistributionBitCount());
iter.update(second.getSuperbucket(), second.getProgress());
assertEquals(2, iter.getDistributionBitCount());
assertEquals(2, progress.getDistributionBitCount());
assertTrue(iter.hasNext());
assertEquals(ProgressToken.toBucketId(2, 2), iter.getNext().getSuperbucket());
assertFalse(iter.hasNext());
} | public void testIgnoredSlicing() throws ParseException {
int distBits = 1;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 3, 2);
assertTrue(iter.hasNext());
VisitorIterator.BucketProgress first = iter.getNext();
assertEquals(ProgressToken.toBucketId(0, 1), first.getSuperbucket());
VisitorIterator.BucketProgress second = iter.getNext();
assertEquals(ProgressToken.toBucketId(1, 1), second.getSuperbucket());
assertFalse(iter.hasNext());
iter.update(first.getSuperbucket(), first.getProgress());
iter.setDistributionBitCount(2);
assertEquals(2, iter.getDistributionBitCount());
assertEquals(1, progress.getDistributionBitCount());
iter.update(second.getSuperbucket(), second.getProgress());
assertEquals(2, iter.getDistributionBitCount());
assertEquals(2, progress.getDistributionBitCount());
assertTrue(iter.hasNext());
assertEquals(ProgressToken.toBucketId(2, 2), iter.getNext().getSuperbucket());
assertFalse(iter.hasNext());
} | class VisitorIteratorTestCase {
@Test
public void testIterationSingleBucketUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user = 1234", idFactory, 1, progress);
assertFalse(progress.hasActive());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), 1);
assertFalse(iter.isDone());
assertTrue(iter.hasNext());
assertEquals(iter.getRemainingBucketCount(), 1);
VisitorIterator.BucketProgress b1 = iter.getNext();
assertEquals(b1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(b1.getProgress(), new BucketId());
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(iter.getRemainingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 1);
assertFalse(progress.hasPending());
BucketId sub = new BucketId(b1.getSuperbucket().getUsedBits() + 1, b1.getSuperbucket().getId());
iter.update(b1.getSuperbucket(), sub);
assertFalse(progress.hasActive());
assertEquals(progress.getPendingBucketCount(), 1);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(iter.getRemainingBucketCount(), 1);
VisitorIterator.BucketProgress b2 = iter.getNext();
assertEquals(b2.getSuperbucket(), new BucketId(32, 1234));
assertEquals(b2.getProgress(), new BucketId(33, 1234));
assertFalse(iter.hasNext());
assertEquals(progress.getActiveBucketCount(), 1);
assertFalse(progress.hasPending());
iter.update(b1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(progress.hasActive());
assertFalse(progress.hasPending());
assertFalse(iter.hasNext());
assertTrue(iter.isDone());
assertTrue(progress.isFinished());
assertEquals(progress.getFinishedBucketCount(), 1);
assertEquals(iter.getRemainingBucketCount(), 0);
}
@Test
public void testInvalidSlicing() throws ParseException {
int distBits = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 0, 0);
}
catch (IllegalArgumentException e) {
assertEquals("slices must be positive, but was 0", e.getMessage());
}
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 1, 1);
}
catch (IllegalArgumentException e) {
assertEquals("sliceId must be in [0, 1), but was 1", e.getMessage());
}
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 1, -1);
}
catch (IllegalArgumentException e) {
assertEquals("sliceId must be in [0, 1), but was -1", e.getMessage());
}
}
@Test
@Test
public void testValidSlicing() throws ParseException {
int distBits = 4;
long buckets = 1 << distBits;
BucketIdFactory idFactory = new BucketIdFactory();
for (int slices = 1; slices <= 2 * buckets; slices++) {
long bucketsTotal = 0;
for (int sliceId = 0; sliceId < slices; sliceId++) {
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, slices, sliceId);
String context = "slices: " + slices + ", sliceId: " + sliceId;
assertEquals(context, progress.getDistributionBitCount(), distBits);
assertTrue(context, iter.getBucketSource() instanceof VisitorIterator.DistributionRangeBucketSource);
assertEquals(context, progress.getFinishedBucketCount(), Math.min(buckets, sliceId));
assertEquals(context, progress.getTotalBucketCount(), buckets);
long bucketCount = 0;
while (iter.hasNext() && progress.getFinishedBucketCount() < buckets / 2) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
++bucketsTotal;
}
if (slices + sliceId < buckets) {
assertEquals(context, ((buckets / 2) + slices - sliceId - 1) / slices, bucketCount);
assertFalse(context, progress.hasActive());
assertFalse(context, progress.hasPending());
assertFalse(context, iter.isDone());
assertTrue(context, iter.hasNext());
assertEquals(context, progress.getFinishedBucketCount(), bucketCount * slices + sliceId);
assertFalse(context, progress.isFinished());
}
while (iter.hasNext()) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
++bucketsTotal;
}
assertEquals(context, (buckets + slices - sliceId - 1) / slices, bucketCount);
assertFalse(context, progress.hasActive());
assertFalse(context, progress.hasPending());
assertTrue(context, iter.isDone());
assertFalse(context, iter.hasNext());
assertEquals(context, progress.getFinishedBucketCount(), buckets);
assertTrue(context, progress.isFinished());
}
assertEquals("slices: " + slices, buckets, bucketsTotal);
}
}
@Test
public void testProgressSerializationRange() throws ParseException {
int distBits = 4;
int buckets = 1 << distBits;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), distBits);
assertTrue(iter.getBucketSource() instanceof VisitorIterator.DistributionRangeBucketSource);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), buckets);
long bucketCount = 0;
long bucketStop = buckets / 2;
while (iter.hasNext() && bucketCount != bucketStop) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertEquals(bucketCount, bucketStop);
assertFalse(progress.hasActive());
assertFalse(progress.hasPending());
assertFalse(iter.isDone());
assertTrue(iter.hasNext());
assertEquals(progress.getFinishedBucketCount(), bucketCount);
assertFalse(progress.isFinished());
StringBuilder desired = new StringBuilder();
desired.append("VDS bucket progress file (50.0% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(buckets);
desired.append('\n');
assertEquals(desired.toString(), progress.toString());
BucketIdFactory idFactory2 = new BucketIdFactory();
{
ProgressToken progDs = new ProgressToken(progress.toString());
assertEquals(progDs.getDistributionBitCount(), distBits);
assertEquals(progDs.getTotalBucketCount(), buckets);
assertEquals(progDs.getFinishedBucketCount(), bucketCount);
VisitorIterator iterDs = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, progDs);
assertFalse(progDs.hasPending());
assertFalse(progDs.hasActive());
assertTrue(iterDs.hasNext());
assertFalse(iterDs.isDone());
assertEquals(distBits, iterDs.getDistributionBitCount());
assertEquals(distBits, progDs.getDistributionBitCount());
VisitorIterator.BucketProgress idDs = iterDs.getNext();
long resumeKey = ProgressToken.makeNthBucketKey(bucketCount, distBits);
assertEquals(idDs.getSuperbucket(), new BucketId(ProgressToken.keyToBucketId(resumeKey)));
assertEquals(idDs.getProgress(), new BucketId());
}
int pendingTotal = buckets / 8;
int activeTotal = buckets / 8;
Vector<VisitorIterator.BucketProgress> trackedBuckets = new Vector<VisitorIterator.BucketProgress>();
for (int i = 0; i < pendingTotal + activeTotal; ++i) {
trackedBuckets.add(iter.getNext());
}
for (int i = 0; i < pendingTotal + activeTotal; ++i) {
VisitorIterator.BucketProgress idTemp = trackedBuckets.get(i);
if (i < activeTotal) {
iter.update(idTemp.getSuperbucket(),
new BucketId(distBits + 2, idTemp.getSuperbucket().getId() | (2 * buckets)));
}
}
assertEquals(progress.getActiveBucketCount(), activeTotal);
assertEquals(progress.getPendingBucketCount(), pendingTotal);
desired = new StringBuilder();
desired.append("VDS bucket progress file (").append(progress.percentFinished()).append("% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(bucketCount + pendingTotal + activeTotal);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(buckets);
desired.append('\n');
assertEquals(progress.getBuckets().entrySet().size(), pendingTotal + activeTotal);
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progress.getBuckets().entrySet()) {
desired.append(Long.toHexString(ProgressToken.keyToBucketId(entry.getKey().getKey())));
desired.append(':');
desired.append(Long.toHexString(entry.getValue().getProgress().getRawId()));
desired.append('\n');
}
assertEquals(progress.toString(), desired.toString());
{
ProgressToken progDs = new ProgressToken(progress.toString());
assertEquals(progDs.getDistributionBitCount(), distBits);
assertEquals(progDs.getTotalBucketCount(), buckets);
assertEquals(progDs.getFinishedBucketCount(), bucketCount);
VisitorIterator iterDs = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, progDs);
assertEquals(progDs.getPendingBucketCount(), pendingTotal + activeTotal);
assertEquals(distBits, progDs.getDistributionBitCount());
assertEquals(distBits, iterDs.getDistributionBitCount());
assertFalse(progDs.hasActive());
assertTrue(iterDs.hasNext());
assertFalse(iterDs.isDone());
assertEquals(progDs.getBucketCursor(), bucketCount + pendingTotal + activeTotal);
}
for (int i = activeTotal; i < activeTotal + pendingTotal; ++i) {
iter.update(trackedBuckets.get(i).getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertEquals(progress.getActiveBucketCount(), 0);
boolean consistentNext = true;
while (!iter.isDone()) {
if (!iter.hasNext()) {
consistentNext = false;
break;
}
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertTrue(consistentNext);
assertFalse(iter.hasNext());
assertTrue(progress.isFinished());
assertEquals(bucketCount, buckets);
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file (100.0% completed)\n");
finished.append(distBits);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
assertEquals(progress.toString(), finished.toString());
}
@Test
public void testProgressSerializationExplicit() throws ParseException {
int distBits = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), distBits);
assertTrue(iter.getBucketSource() instanceof VisitorIterator.ExplicitBucketSource);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), 3);
assertEquals(progress.getPendingBucketCount(), 3);
VisitorIterator.BucketProgress bp1 = iter.getNext();
VisitorIterator.BucketProgress bp2 = iter.getNext();
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 2);
assertEquals(bp1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp1.getProgress(), new BucketId());
iter.update(bp1.getSuperbucket(), new BucketId(36, 1234));
assertEquals(progress.getPendingBucketCount(), 2);
assertEquals(bp2.getSuperbucket(), new BucketId(32, 8009));
assertEquals(bp2.getProgress(), new BucketId());
{
StringBuilder desired = new StringBuilder();
desired.append("VDS bucket progress file (").append(progress.percentFinished()).append("% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(0);
desired.append('\n');
desired.append(0);
desired.append('\n');
desired.append(3);
desired.append('\n');
desired.append(Long.toHexString(new BucketId(32, 1234).getRawId()));
desired.append(':');
desired.append(Long.toHexString(new BucketId(36, 1234).getRawId()));
desired.append('\n');
desired.append(Long.toHexString(new BucketId(32, 8009).getRawId()));
desired.append(":0\n");
desired.append(Long.toHexString(new BucketId(32, 6789).getRawId()));
desired.append(":0\n");
assertEquals(desired.toString(), progress.toString());
ProgressToken prog2 = new ProgressToken(progress.toString());
assertEquals(prog2.getDistributionBitCount(), distBits);
assertEquals(prog2.getTotalBucketCount(), 3);
assertEquals(prog2.getFinishedBucketCount(), 0);
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, prog2);
assertEquals(prog2.getPendingBucketCount(), 3);
assertFalse(prog2.hasActive());
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
assertTrue(iter2.getBucketSource() instanceof VisitorIterator.ExplicitBucketSource);
assertFalse(iter2.getBucketSource().hasNext());
VisitorIterator.BucketProgress bp = iter2.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp.getProgress(), new BucketId(36, 1234));
assertEquals(prog2.getPendingBucketCount(), 2);
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
bp = iter2.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(32, 8009));
assertEquals(bp.getProgress(), new BucketId());
assertEquals(prog2.getPendingBucketCount(), 1);
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
bp = iter2.getNext();
assertEquals(prog2.getPendingBucketCount(), 0);
assertEquals(bp.getSuperbucket(), new BucketId(32, 6789));
assertEquals(bp.getProgress(), new BucketId());
assertFalse(iter2.hasNext());
assertFalse(iter2.isDone());
assertEquals(prog2.getActiveBucketCount(), 3);
}
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
bp1 = iter.getNext();
assertEquals(bp1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp1.getProgress(), new BucketId(36, 1234));
iter.update(bp1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
bp1 = iter.getNext();
assertEquals(bp1.getSuperbucket(), new BucketId(32, 6789));
assertEquals(bp1.getProgress(), new BucketId());
assertEquals(
progress.toString(),
"VDS bucket progress file (" + progress.percentFinished() + "% completed)\n" +
"16\n" +
"0\n" +
"1\n" +
"3\n" +
"8000000000001f49:0\n" +
"8000000000001a85:0\n");
iter.update(bp1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(bp2.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(iter.hasNext());
assertTrue(iter.isDone());
assertTrue(progress.isFinished());
assertEquals(progress.getActiveBucketCount(), 0);
{
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file (100.0% completed)\n");
finished.append(distBits);
finished.append('\n');
finished.append(0);
finished.append('\n');
finished.append(3);
finished.append('\n');
finished.append(3);
finished.append('\n');
assertEquals(finished.toString(), progress.toString());
}
}
/**
* Test that doing update() on a bucket several times in a row (without re-fetching
* from getNext first) works
*/
@Test
public void testActiveUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group = \"yahoo.com\"", idFactory, 16, progress);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
BucketId superbucket = bp.getSuperbucket();
int usedBits = superbucket.getUsedBits();
iter.update(superbucket, new BucketId(usedBits + 2, superbucket.getId() | (2L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
iter.update(superbucket, new BucketId(usedBits + 2, superbucket.getId() | (1L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), superbucket);
assertEquals(bp.getProgress(), new BucketId(usedBits + 2, superbucket.getId() | (1L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
}
/**
* Test that ensures doing update(superbucket, 0) simply puts the bucket back in
* pending
*/
@Test
public void testNullAndSuperUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group = \"yahoo.com\"", idFactory, 16, progress);
assertEquals(progress.getPendingBucketCount(), 1);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getProgress(), new BucketId());
BucketId superbucket = bp.getSuperbucket();
BucketId sub = bp.getProgress();
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(superbucket, ProgressToken.NULL_BUCKET);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
VisitorIterator.BucketProgress bp2 = iter.getNext();
assertEquals(bp2.getSuperbucket(), superbucket);
assertEquals(bp2.getProgress(), ProgressToken.NULL_BUCKET);
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(superbucket, superbucket);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
bp2 = iter.getNext();
assertEquals(bp2.getSuperbucket(), superbucket);
assertEquals(bp2.getProgress(), superbucket);
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
}
@Test
public void testDeserializedFinishedProgress() {
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file\n");
finished.append(17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
ProgressToken token = new ProgressToken(finished.toString());
assertEquals(token.getDistributionBitCount(), 17);
assertEquals(token.getTotalBucketCount(), 1L << 17);
assertEquals(token.getFinishedBucketCount(), 1L << 17);
assertEquals(token.getBucketCursor(), 1L << 17);
assertTrue(token.isFinished());
ProgressToken token2 = new ProgressToken(token.serialize());
assertEquals(17, token2.getDistributionBitCount());
assertEquals(1L << 17, token2.getTotalBucketCount());
assertEquals(1L << 17, token2.getFinishedBucketCount());
assertEquals(1L << 17, token2.getBucketCursor());
assertTrue(token2.isFinished());
}
@Test
public void testBucketProgressFraction() {
double epsilon = 0.00001;
BucketId b_0 = new BucketId();
BucketId b_100_0 = new BucketId(16, 1234);
BucketId b_50_1 = new BucketId(17, 1234);
BucketId b_100_1 = new BucketId(17, 1234 | (1 << 16));
BucketId b_25_2 = new BucketId(18, 1234);
BucketId b_50_2 = new BucketId(18, 1234 | (2 << 16));
BucketId b_75_2 = new BucketId(18, 1234 | (1 << 16));
BucketId b_100_2 = new BucketId(18, 1234 | (3 << 16));
ProgressToken p = new ProgressToken(16);
BucketId sb = new BucketId(16, 1234);
assertEquals(p.progressFraction(new BucketId(32, 1234), b_0), 0.0, epsilon);
assertEquals(p.progressFraction(sb, b_100_0), 1.0, epsilon);
assertEquals(p.progressFraction(sb, b_50_1), 0.5, epsilon);
assertEquals(p.progressFraction(sb, b_100_1), 1.0, epsilon);
assertEquals(p.progressFraction(sb, b_25_2), 0.25, epsilon);
assertEquals(p.progressFraction(sb, b_50_2), 0.5, epsilon);
assertEquals(p.progressFraction(sb, b_75_2), 0.75, epsilon);
assertEquals(p.progressFraction(sb, b_100_2), 1.0, epsilon);
assertEquals(p.progressFraction(new BucketId(0x8000000000000000L),
new BucketId(0xb0000fff00000000L)), 1.0, epsilon);
}
@Test
public void testProgressEstimation() throws ParseException {
int distBits = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), 4);
double epsilon = 0.00001;
assertEquals(progress.percentFinished(), 0, epsilon);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 6.25, epsilon);
assertEquals(progress.getFinishedBucketCount(), 1);
bp = iter.getNext();
VisitorIterator.BucketProgress bp3 = iter.getNext();
VisitorIterator.BucketProgress bp4 = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 12.5, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp3.getSuperbucket(), new BucketId(distBits + 2, bp3.getSuperbucket().getId() | (1 << distBits)));
assertEquals(progress.percentFinished(), 17.1875, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp4.getSuperbucket(), new BucketId(distBits + 2, bp4.getSuperbucket().getId()));
assertEquals(progress.percentFinished(), 18.75, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp4.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.update(bp3.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 25, epsilon);
assertEquals(progress.getFinishedBucketCount(), 4);
while (iter.hasNext()) {
bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertEquals(progress.getFinishedBucketCount(), 16);
assertEquals(progress.percentFinished(), 100, epsilon);
}
@Test
public void testBucketKeyWrapperOrdering() {
ProgressToken.BucketKeyWrapper bk1 = new ProgressToken.BucketKeyWrapper(0x0000000000000001L);
ProgressToken.BucketKeyWrapper bk2 = new ProgressToken.BucketKeyWrapper(0x7FFFFFFFFFFFFFFFL);
ProgressToken.BucketKeyWrapper bk3 = new ProgressToken.BucketKeyWrapper(0x8000000000000000L);
ProgressToken.BucketKeyWrapper bk4 = new ProgressToken.BucketKeyWrapper(0xFFFFFFFFFFFFFFFFL);
assertTrue(bk1.compareTo(bk2) < 0);
assertTrue(bk2.compareTo(bk3) < 0);
assertTrue(bk3.compareTo(bk4) < 0);
assertTrue(bk2.compareTo(bk1) > 0);
assertTrue(bk3.compareTo(bk2) > 0);
assertTrue(bk4.compareTo(bk3) > 0);
ProgressToken.BucketKeyWrapper bk5 = new ProgressToken.BucketKeyWrapper(0x7FFFFFFFFFFFFFFFL);
ProgressToken.BucketKeyWrapper bk6 = new ProgressToken.BucketKeyWrapper(0x8000000000000000L);
assertTrue(bk5.compareTo(bk2) == 0);
assertTrue(bk6.compareTo(bk3) == 0);
}
private void doTestBucketKeyGeneration(int db) {
ProgressToken.BucketKeyWrapper[] keys = new ProgressToken.BucketKeyWrapper[1 << db];
for (int i = 0; i < (1 << db); ++i) {
keys[i] = new ProgressToken.BucketKeyWrapper(
ProgressToken.bucketToKey(new BucketId(db, i).getId()));
}
Arrays.sort(keys);
boolean consistentKeys = true;
for (int i = 0; i < (1 << db); ++i) {
long genKey = ProgressToken.makeNthBucketKey(i, db);
long knownKey = keys[i].getKey();
if (genKey != knownKey) {
consistentKeys = false;
break;
}
}
assertTrue(consistentKeys);
}
@Test
public void testBucketKeyGeneration() {
for (int i = 1; i < 14; ++i) {
doTestBucketKeyGeneration(i);
}
}
@Test
public void testSingleBucketSplits() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 0));
iter.update(bp.getSuperbucket(), new BucketId());
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 0));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 0));
assertEquals(bp.getProgress(), new BucketId(0));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 4));
assertEquals(bp.getProgress(), new BucketId(0));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 2));
iter.update(bp.getSuperbucket(), new BucketId(db + 3, 0x12));
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 2));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 2));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x12));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 6));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x12));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 1));
iter.update(bp.getSuperbucket(), new BucketId(db + 3, 0x15));
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 1));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 1));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x15));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 5));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x15));
}
/**
* Test increasing the distribution bits for a full bucket space range
* source with no finished, active or pending buckets
* @throws ParseException upon docsel parse failure (shouldn't happen)
*/
@Test
public void testRangeDistributionBitIncrease1NoPending() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
assertEquals(p.getTotalBucketCount(), 4);
iter.setDistributionBitCount(db + 1);
assertEquals(p.getTotalBucketCount(), 8);
assertEquals(p.getDistributionBitCount(), db + 1);
assertEquals(iter.getDistributionBitCount(), db + 1);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db + 1);
int[] desired = new int[] { 0, 4, 2, 6, 1, 5, 3, 7 };
for (int i = 0; i < 8; ++i) {
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, desired[i]));
}
}
@Test
public void testRangeDistributionBitIncrease1AllBucketStates() throws ParseException {
int db = 3;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[3];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
bpp[2] = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
iter.update(bpp[2].getSuperbucket(), new BucketId());
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getPendingBucketCount(), 3);
assertEquals(p.getActiveBucketCount(), 0);
iter.setDistributionBitCount(db + 1);
assertEquals(p.getTotalBucketCount(), 16);
assertEquals(p.getFinishedBucketCount(), 2);
assertEquals(p.getPendingBucketCount(), 6);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getDistributionBitCount(), db + 1);
assertEquals(iter.getDistributionBitCount(), db + 1);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db + 1);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x04));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0C));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x02));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0A));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x06));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0E));
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x01));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x09));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x05));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0D));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x03));
}
@Test
public void testRangeDistributionIncreaseMultipleBits() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[2];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
VisitorIterator.BucketProgress bpa = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
iter.setDistributionBitCount(20);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getDistributionBitCount(), 20);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 20);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertTrue(iter.getBucketSource().shouldYield());
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getActiveBucketCount(), 1);
iter.update(bpa.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(p.getDistributionBitCount(), 20);
assertEquals(p.getPendingBucketCount(), 32);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 96);
assertEquals(p.getFinishedBucketCount(), 16 * 4);
for (int i = 0; i < 32; ++i) {
long testKey = ProgressToken.makeNthBucketKey(i + 48, 20);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(ProgressToken.keyToBucketId(testKey)));
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 16 * 6);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(20, 0x6000));
}
@Test
public void testSingleBucketMerge() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), new BucketId());
p.splitPendingBucket(new BucketId(db, 0));
assertEquals(p.getPendingBucketCount(), 2);
p.mergePendingBucket(new BucketId(db + 1, 0));
assertEquals(p.getPendingBucketCount(), 1);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 0));
}
@Test
public void testRangeDistributionBitDecrease1() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
assertTrue(src.isLosslessResetPossible());
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertFalse(src.isLosslessResetPossible());
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[6];
for (int i = 0; i < 6; ++i) {
bpp[i] = iter.getNext();
}
VisitorIterator.BucketProgress bpa = iter.getNext();
for (int i = 0; i < 6; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertEquals(p.getBucketCursor(), 10);
iter.setDistributionBitCount(db - 1);
assertEquals(iter.getDistributionBitCount(), db - 1);
assertEquals(p.getDistributionBitCount(), db);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db - 1);
assertTrue(iter.getBucketSource().shouldYield());
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(bpa.getSuperbucket(), new BucketId());
assertEquals(p.getDistributionBitCount(), db - 1);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 4);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getBucketCursor(), 5);
}
@Test
public void testRangeDistributionBitIncreaseDecrease() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
assertTrue(src.isLosslessResetPossible());
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[4];
for (int i = 0; i < 4; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 4; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertFalse(src.isLosslessResetPossible());
iter.setDistributionBitCount(20);
assertEquals(p.getDistributionBitCount(), 20);
assertEquals(p.getPendingBucketCount(), 4 << 4);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getBucketCursor(), 5 << 4);
iter.setDistributionBitCount(16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 4);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getBucketCursor(), 5);
}
@Test
public void testRangeDistributionBitChangeWithoutDone() throws ParseException {
int db = 11;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[4];
for (int i = 0; i < 4; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 2; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertFalse(src.isLosslessResetPossible());
iter.setDistributionBitCount(9);
assertEquals(p.getDistributionBitCount(), 11);
assertEquals(p.getActiveBucketCount(), 2);
assertEquals(p.getPendingBucketCount(), 2);
assertTrue(iter.getBucketSource().shouldYield());
iter.update(bpp[3].getSuperbucket(), new BucketId(15, bpp[3].getSuperbucket().getId()));
iter.setDistributionBitCount(12);
assertEquals(p.getActiveBucketCount(), 1);
assertEquals(p.getPendingBucketCount(), 3);
assertTrue(iter.getBucketSource().shouldYield());
String serialized = p.toString();
iter.update(bpp[2].getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 3 * 2);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getFinishedBucketCount(), 2);
ProgressToken p2 = new ProgressToken(serialized);
assertEquals(p2.getDistributionBitCount(), 11);
BucketIdFactory idFactory2 = new BucketIdFactory();
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, p2);
assertEquals(iter2.getDistributionBitCount(), 11);
assertEquals(p2.getDistributionBitCount(), 11);
iter2.setDistributionBitCount(12);
assertEquals(p2.getDistributionBitCount(), 12);
assertEquals(p2.getPendingBucketCount(), 8);
assertEquals(p2.getBucketCursor(), 8);
assertEquals(p2.getFinishedBucketCount(), 0);
}
@Test
public void testRangeDistributionBitInitialDrop() throws ParseException {
int db = 31;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress[] bp = new VisitorIterator.BucketProgress[3];
bp[0] = iter.getNext();
bp[1] = iter.getNext();
bp[2] = iter.getNext();
iter.update(bp[2].getSuperbucket(), new BucketId());
iter.update(bp[1].getSuperbucket(), new BucketId());
assertEquals(p.getActiveBucketCount(), 1);
iter.setDistributionBitCount(11);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(new BucketId(31, 0), new BucketId());
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
}
@Test
public void testRangeDistributionLosslessReset() throws ParseException {
int db = 1;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
VisitorIterator.BucketProgress[] bp = new VisitorIterator.BucketProgress[2];
bp[0] = iter.getNext();
bp[1] = iter.getNext();
String serialized = p.toString();
assertFalse(src.isLosslessResetPossible());
iter.update(bp[1].getSuperbucket(), new BucketId());
assertEquals(p.getActiveBucketCount(), 1);
iter.setDistributionBitCount(11);
assertFalse(src.isLosslessResetPossible());
assertEquals(p.getDistributionBitCount(), 1);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(new BucketId(1, 0), new BucketId());
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
assertEquals(p.getDistributionBitCount(), 11);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
p = new ProgressToken(serialized);
idFactory = new BucketIdFactory();
iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
iter.setDistributionBitCount(11);
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
assertEquals(p.getDistributionBitCount(), 11);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
}
@Test
public void testExplicitDistributionBitIncrease() throws ParseException {
int distBits = 12;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, p);
assertEquals(iter.getDistributionBitCount(), distBits);
assertEquals(p.getDistributionBitCount(), distBits);
assertEquals(iter.getBucketSource().getDistributionBitCount(), distBits);
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.setDistributionBitCount(16);
assertEquals(iter.getDistributionBitCount(), 16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getTotalBucketCount(), 3);
}
@Test
public void testExplicitDistributionBitDecrease() throws ParseException {
int distBits = 20;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, p);
assertEquals(iter.getDistributionBitCount(), distBits);
assertEquals(p.getDistributionBitCount(), distBits);
assertEquals(iter.getBucketSource().getDistributionBitCount(), distBits);
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.setDistributionBitCount(16);
assertEquals(iter.getDistributionBitCount(), 16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getTotalBucketCount(), 3);
}
@Test
public void testExplicitDistributionImportNoTruncation() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, 20, p);
assertEquals(20, iter.getDistributionBitCount());
assertEquals(20, p.getDistributionBitCount());
assertEquals(20, iter.getBucketSource().getDistributionBitCount());
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
String serialized = p.toString();
ProgressToken p2 = new ProgressToken(serialized);
BucketIdFactory idFactory2 = new BucketIdFactory();
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory2, 1, p2);
assertEquals(20, iter2.getDistributionBitCount());
assertEquals(20, p2.getDistributionBitCount());
assertEquals(20, iter2.getBucketSource().getDistributionBitCount());
assertEquals(2, p2.getPendingBucketCount());
assertEquals(1, p2.getFinishedBucketCount());
assertEquals(3, p2.getTotalBucketCount());
}
@Test
public void testImportProgressWithOutdatedDistribution() throws ParseException {
String input = "VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n";
int db = 12;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
assertEquals(10, p.getDistributionBitCount());
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
iter.setDistributionBitCount(12);
assertEquals(iter.getDistributionBitCount(), 12);
assertEquals(p.getDistributionBitCount(), 12);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 12);
assertEquals(p.getTotalBucketCount(), 1 << 12);
assertEquals(p.getFinishedBucketCount(), 500 << 2);
assertEquals(p.getPendingBucketCount(), 3 << 2);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 503 << 2);
assertTrue(iter.hasNext());
ProgressToken p2 = new ProgressToken(p.serialize());
assertEquals(p2.getDistributionBitCount(), 12);
assertEquals(p2.getTotalBucketCount(), 1 << 12);
assertEquals(p2.getFinishedBucketCount(), 500 << 2);
assertEquals(p2.getPendingBucketCount(), 3 << 2);
assertEquals(p2.getActiveBucketCount(), 0);
assertEquals(p2.getBucketCursor(), 503 << 2);
}
@Test
public void testImportInconsistentProgressIncrease() throws ParseException {
String input = "VDS bucket progress file\n" +
"7\n" +
"32\n" +
"24\n" +
"128\n" +
"100000000000000c:0\n";
int db = 8;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
assertEquals(7, p.getDistributionBitCount());
assertEquals(p.getTotalBucketCount(), 1 << 7);
assertEquals(p.getFinishedBucketCount(), 24);
assertEquals(p.getPendingBucketCount(), 1);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(32, p.getBucketCursor());
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
assertEquals(7, p.getDistributionBitCount());
assertEquals(p.getPendingBucketCount(), 1 << 3);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(24 + (1 << 3), p.getBucketCursor());
iter.setDistributionBitCount(8);
assertEquals(iter.getDistributionBitCount(), 8);
assertEquals(p.getDistributionBitCount(), 8);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 8);
assertEquals(p.getTotalBucketCount(), 1 << 8);
assertEquals(p.getFinishedBucketCount(), 24 << 1);
assertEquals(p.getPendingBucketCount(), 1 << 4);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 24*2 + (1 << 4));
assertTrue(iter.hasNext());
}
@Test
public void testImportInconsistentProgressDecrease() throws ParseException {
String input = "VDS bucket progress file\n" +
"7\n" +
"32\n" +
"24\n" +
"128\n" +
"100000000000000c:0\n";
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
assertEquals(iter.getDistributionBitCount(), 7);
iter.setDistributionBitCount(6);
assertEquals(iter.getDistributionBitCount(), 6);
assertEquals(p.getDistributionBitCount(), 6);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 6);
assertEquals(p.getTotalBucketCount(), 1 << 6);
assertEquals(p.getFinishedBucketCount(), 24 >> 1);
assertEquals(p.getPendingBucketCount(), 1 << 2);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 24/2 + (1 << 2));
assertTrue(iter.hasNext());
}
@Test
public void testEntireBucketSpaceCovered() throws ParseException {
int db = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[3];
for (int i = 0; i < 3; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 3; ++i) {
iter.update(bpp[i].getSuperbucket(),
new BucketId(db + 1, bpp[i].getSuperbucket().getId()));
}
Set<BucketId> buckets = new TreeSet<BucketId>();
db = 7;
for (int i = 0; i < (1 << db); ++i) {
buckets.add(new BucketId(db, i));
}
iter.setDistributionBitCount(db);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 3 << 3);
while (iter.hasNext()) {
VisitorIterator.BucketProgress bp = iter.getNext();
assertTrue(buckets.contains(bp.getSuperbucket()));
buckets.remove(bp.getSuperbucket());
}
assertTrue(buckets.isEmpty());
}
@Test
public void testExceptionOnWrongDocumentSelection() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
boolean caughtIt = false;
try {
ProgressToken p = new ProgressToken("VDS bucket progress file\n16\n3\n1\n3\n"
+ "8000000000001f49:0\n8000000000001a85:0\n");
VisitorIterator.createFromDocumentSelection("id.group != \"yahoo.com\"", idFactory, 16, p);
}
catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
caughtIt = false;
try {
ProgressToken p = new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n");
VisitorIterator.createFromDocumentSelection("id.group=\"yahoo.com\" or id.user=555", idFactory, 16, p);
}
catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testIsBucketFinished() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 4, p);
assertFalse(p.isBucketFinished(new BucketId(32, 0)));
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(p.isBucketFinished(new BucketId(32, 0)));
assertFalse(p.isBucketFinished(new BucketId(32, 1 << 3)));
VisitorIterator.BucketProgress bp = iter.getNext();
assertFalse(p.isBucketFinished(new BucketId(32, 1 << 3)));
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(p.isBucketFinished(new BucketId(32, 1 << 3)));
assertTrue(p.isBucketFinished(new BucketId(32, 0x12345670)));
assertTrue(p.isBucketFinished(new BucketId(32, 0x12345678)));
assertFalse(p.isBucketFinished(new BucketId(32, 0x12345671)));
assertFalse(p.isBucketFinished(new BucketId(32, 0x12345679)));
}
@Test
public void testInconsistentState() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[2];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
VisitorIterator.BucketProgress bpa = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
assertFalse(p.isInconsistentState());
iter.setDistributionBitCount(20);
assertTrue(p.isInconsistentState());
iter.update(bpa.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(p.isInconsistentState());
}
@Test
public void testMalformedProgressFile() {
boolean caughtIt = false;
try {
new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testFailOnTooFewLinesInFile() {
boolean caughtIt = false;
try {
new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testUnknownFirstHeaderLine() {
boolean caughtIt = false;
try {
new ProgressToken("Smurf Time 3000\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testBinaryProgressSerialization() {
String input = "VDS bucket progress file (48.828125% completed)\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n";
ProgressToken p = new ProgressToken(input);
byte[] buf = p.serialize();
ProgressToken p2 = new ProgressToken(buf);
assertEquals(input, p2.toString());
}
} | class VisitorIteratorTestCase {
@Test
public void testIterationSingleBucketUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user = 1234", idFactory, 1, progress);
assertFalse(progress.hasActive());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), 1);
assertFalse(iter.isDone());
assertTrue(iter.hasNext());
assertEquals(iter.getRemainingBucketCount(), 1);
VisitorIterator.BucketProgress b1 = iter.getNext();
assertEquals(b1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(b1.getProgress(), new BucketId());
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(iter.getRemainingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 1);
assertFalse(progress.hasPending());
BucketId sub = new BucketId(b1.getSuperbucket().getUsedBits() + 1, b1.getSuperbucket().getId());
iter.update(b1.getSuperbucket(), sub);
assertFalse(progress.hasActive());
assertEquals(progress.getPendingBucketCount(), 1);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(iter.getRemainingBucketCount(), 1);
VisitorIterator.BucketProgress b2 = iter.getNext();
assertEquals(b2.getSuperbucket(), new BucketId(32, 1234));
assertEquals(b2.getProgress(), new BucketId(33, 1234));
assertFalse(iter.hasNext());
assertEquals(progress.getActiveBucketCount(), 1);
assertFalse(progress.hasPending());
iter.update(b1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(progress.hasActive());
assertFalse(progress.hasPending());
assertFalse(iter.hasNext());
assertTrue(iter.isDone());
assertTrue(progress.isFinished());
assertEquals(progress.getFinishedBucketCount(), 1);
assertEquals(iter.getRemainingBucketCount(), 0);
}
@Test
public void testInvalidSlicing() throws ParseException {
int distBits = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 0, 0);
}
catch (IllegalArgumentException e) {
assertEquals("slices must be positive, but was 0", e.getMessage());
}
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 1, 1);
}
catch (IllegalArgumentException e) {
assertEquals("sliceId must be in [0, 1), but was 1", e.getMessage());
}
try {
VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, 1, -1);
}
catch (IllegalArgumentException e) {
assertEquals("sliceId must be in [0, 1), but was -1", e.getMessage());
}
}
@Test
@Test
public void testValidSlicing() throws ParseException {
int distBits = 4;
long buckets = 1 << distBits;
BucketIdFactory idFactory = new BucketIdFactory();
for (int slices = 1; slices <= 2 * buckets; slices++) {
long bucketsTotal = 0;
for (int sliceId = 0; sliceId < slices; sliceId++) {
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress, slices, sliceId);
String context = "slices: " + slices + ", sliceId: " + sliceId;
assertEquals(context, progress.getDistributionBitCount(), distBits);
assertTrue(context, iter.getBucketSource() instanceof VisitorIterator.DistributionRangeBucketSource);
assertEquals(context, progress.getFinishedBucketCount(), Math.min(buckets, sliceId));
assertEquals(context, progress.getTotalBucketCount(), buckets);
long bucketCount = 0;
while (iter.hasNext() && progress.getFinishedBucketCount() < buckets / 2) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
++bucketsTotal;
}
if (slices + sliceId < buckets) {
assertEquals(context, ((buckets / 2) + slices - sliceId - 1) / slices, bucketCount);
assertFalse(context, progress.hasActive());
assertFalse(context, progress.hasPending());
assertFalse(context, iter.isDone());
assertTrue(context, iter.hasNext());
assertEquals(context, progress.getFinishedBucketCount(), bucketCount * slices + sliceId);
assertFalse(context, progress.isFinished());
}
while (iter.hasNext()) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
++bucketsTotal;
}
assertEquals(context, (buckets + slices - sliceId - 1) / slices, bucketCount);
assertFalse(context, progress.hasActive());
assertFalse(context, progress.hasPending());
assertTrue(context, iter.isDone());
assertFalse(context, iter.hasNext());
assertEquals(context, progress.getFinishedBucketCount(), buckets);
assertTrue(context, progress.isFinished());
}
assertEquals("slices: " + slices, buckets, bucketsTotal);
}
}
@Test
public void testProgressSerializationRange() throws ParseException {
int distBits = 4;
int buckets = 1 << distBits;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), distBits);
assertTrue(iter.getBucketSource() instanceof VisitorIterator.DistributionRangeBucketSource);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), buckets);
long bucketCount = 0;
long bucketStop = buckets / 2;
while (iter.hasNext() && bucketCount != bucketStop) {
VisitorIterator.BucketProgress ids = iter.getNext();
iter.update(ids.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertEquals(bucketCount, bucketStop);
assertFalse(progress.hasActive());
assertFalse(progress.hasPending());
assertFalse(iter.isDone());
assertTrue(iter.hasNext());
assertEquals(progress.getFinishedBucketCount(), bucketCount);
assertFalse(progress.isFinished());
StringBuilder desired = new StringBuilder();
desired.append("VDS bucket progress file (50.0% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(buckets);
desired.append('\n');
assertEquals(desired.toString(), progress.toString());
BucketIdFactory idFactory2 = new BucketIdFactory();
{
ProgressToken progDs = new ProgressToken(progress.toString());
assertEquals(progDs.getDistributionBitCount(), distBits);
assertEquals(progDs.getTotalBucketCount(), buckets);
assertEquals(progDs.getFinishedBucketCount(), bucketCount);
VisitorIterator iterDs = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, progDs);
assertFalse(progDs.hasPending());
assertFalse(progDs.hasActive());
assertTrue(iterDs.hasNext());
assertFalse(iterDs.isDone());
assertEquals(distBits, iterDs.getDistributionBitCount());
assertEquals(distBits, progDs.getDistributionBitCount());
VisitorIterator.BucketProgress idDs = iterDs.getNext();
long resumeKey = ProgressToken.makeNthBucketKey(bucketCount, distBits);
assertEquals(idDs.getSuperbucket(), new BucketId(ProgressToken.keyToBucketId(resumeKey)));
assertEquals(idDs.getProgress(), new BucketId());
}
int pendingTotal = buckets / 8;
int activeTotal = buckets / 8;
Vector<VisitorIterator.BucketProgress> trackedBuckets = new Vector<VisitorIterator.BucketProgress>();
for (int i = 0; i < pendingTotal + activeTotal; ++i) {
trackedBuckets.add(iter.getNext());
}
for (int i = 0; i < pendingTotal + activeTotal; ++i) {
VisitorIterator.BucketProgress idTemp = trackedBuckets.get(i);
if (i < activeTotal) {
iter.update(idTemp.getSuperbucket(),
new BucketId(distBits + 2, idTemp.getSuperbucket().getId() | (2 * buckets)));
}
}
assertEquals(progress.getActiveBucketCount(), activeTotal);
assertEquals(progress.getPendingBucketCount(), pendingTotal);
desired = new StringBuilder();
desired.append("VDS bucket progress file (").append(progress.percentFinished()).append("% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(bucketCount + pendingTotal + activeTotal);
desired.append('\n');
desired.append(bucketCount);
desired.append('\n');
desired.append(buckets);
desired.append('\n');
assertEquals(progress.getBuckets().entrySet().size(), pendingTotal + activeTotal);
for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry
: progress.getBuckets().entrySet()) {
desired.append(Long.toHexString(ProgressToken.keyToBucketId(entry.getKey().getKey())));
desired.append(':');
desired.append(Long.toHexString(entry.getValue().getProgress().getRawId()));
desired.append('\n');
}
assertEquals(progress.toString(), desired.toString());
{
ProgressToken progDs = new ProgressToken(progress.toString());
assertEquals(progDs.getDistributionBitCount(), distBits);
assertEquals(progDs.getTotalBucketCount(), buckets);
assertEquals(progDs.getFinishedBucketCount(), bucketCount);
VisitorIterator iterDs = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, progDs);
assertEquals(progDs.getPendingBucketCount(), pendingTotal + activeTotal);
assertEquals(distBits, progDs.getDistributionBitCount());
assertEquals(distBits, iterDs.getDistributionBitCount());
assertFalse(progDs.hasActive());
assertTrue(iterDs.hasNext());
assertFalse(iterDs.isDone());
assertEquals(progDs.getBucketCursor(), bucketCount + pendingTotal + activeTotal);
}
for (int i = activeTotal; i < activeTotal + pendingTotal; ++i) {
iter.update(trackedBuckets.get(i).getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertEquals(progress.getActiveBucketCount(), 0);
boolean consistentNext = true;
while (!iter.isDone()) {
if (!iter.hasNext()) {
consistentNext = false;
break;
}
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
++bucketCount;
}
assertTrue(consistentNext);
assertFalse(iter.hasNext());
assertTrue(progress.isFinished());
assertEquals(bucketCount, buckets);
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file (100.0% completed)\n");
finished.append(distBits);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
finished.append(buckets);
finished.append('\n');
assertEquals(progress.toString(), finished.toString());
}
@Test
public void testProgressSerializationExplicit() throws ParseException {
int distBits = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), distBits);
assertTrue(iter.getBucketSource() instanceof VisitorIterator.ExplicitBucketSource);
assertEquals(progress.getFinishedBucketCount(), 0);
assertEquals(progress.getTotalBucketCount(), 3);
assertEquals(progress.getPendingBucketCount(), 3);
VisitorIterator.BucketProgress bp1 = iter.getNext();
VisitorIterator.BucketProgress bp2 = iter.getNext();
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 2);
assertEquals(bp1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp1.getProgress(), new BucketId());
iter.update(bp1.getSuperbucket(), new BucketId(36, 1234));
assertEquals(progress.getPendingBucketCount(), 2);
assertEquals(bp2.getSuperbucket(), new BucketId(32, 8009));
assertEquals(bp2.getProgress(), new BucketId());
{
StringBuilder desired = new StringBuilder();
desired.append("VDS bucket progress file (").append(progress.percentFinished()).append("% completed)\n");
desired.append(distBits);
desired.append('\n');
desired.append(0);
desired.append('\n');
desired.append(0);
desired.append('\n');
desired.append(3);
desired.append('\n');
desired.append(Long.toHexString(new BucketId(32, 1234).getRawId()));
desired.append(':');
desired.append(Long.toHexString(new BucketId(36, 1234).getRawId()));
desired.append('\n');
desired.append(Long.toHexString(new BucketId(32, 8009).getRawId()));
desired.append(":0\n");
desired.append(Long.toHexString(new BucketId(32, 6789).getRawId()));
desired.append(":0\n");
assertEquals(desired.toString(), progress.toString());
ProgressToken prog2 = new ProgressToken(progress.toString());
assertEquals(prog2.getDistributionBitCount(), distBits);
assertEquals(prog2.getTotalBucketCount(), 3);
assertEquals(prog2.getFinishedBucketCount(), 0);
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, prog2);
assertEquals(prog2.getPendingBucketCount(), 3);
assertFalse(prog2.hasActive());
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
assertTrue(iter2.getBucketSource() instanceof VisitorIterator.ExplicitBucketSource);
assertFalse(iter2.getBucketSource().hasNext());
VisitorIterator.BucketProgress bp = iter2.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp.getProgress(), new BucketId(36, 1234));
assertEquals(prog2.getPendingBucketCount(), 2);
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
bp = iter2.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(32, 8009));
assertEquals(bp.getProgress(), new BucketId());
assertEquals(prog2.getPendingBucketCount(), 1);
assertTrue(iter2.hasNext());
assertFalse(iter2.isDone());
bp = iter2.getNext();
assertEquals(prog2.getPendingBucketCount(), 0);
assertEquals(bp.getSuperbucket(), new BucketId(32, 6789));
assertEquals(bp.getProgress(), new BucketId());
assertFalse(iter2.hasNext());
assertFalse(iter2.isDone());
assertEquals(prog2.getActiveBucketCount(), 3);
}
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
bp1 = iter.getNext();
assertEquals(bp1.getSuperbucket(), new BucketId(32, 1234));
assertEquals(bp1.getProgress(), new BucketId(36, 1234));
iter.update(bp1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
bp1 = iter.getNext();
assertEquals(bp1.getSuperbucket(), new BucketId(32, 6789));
assertEquals(bp1.getProgress(), new BucketId());
assertEquals(
progress.toString(),
"VDS bucket progress file (" + progress.percentFinished() + "% completed)\n" +
"16\n" +
"0\n" +
"1\n" +
"3\n" +
"8000000000001f49:0\n" +
"8000000000001a85:0\n");
iter.update(bp1.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(bp2.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(iter.hasNext());
assertTrue(iter.isDone());
assertTrue(progress.isFinished());
assertEquals(progress.getActiveBucketCount(), 0);
{
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file (100.0% completed)\n");
finished.append(distBits);
finished.append('\n');
finished.append(0);
finished.append('\n');
finished.append(3);
finished.append('\n');
finished.append(3);
finished.append('\n');
assertEquals(finished.toString(), progress.toString());
}
}
/**
* Test that doing update() on a bucket several times in a row (without re-fetching
* from getNext first) works
*/
@Test
public void testActiveUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group = \"yahoo.com\"", idFactory, 16, progress);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
BucketId superbucket = bp.getSuperbucket();
int usedBits = superbucket.getUsedBits();
iter.update(superbucket, new BucketId(usedBits + 2, superbucket.getId() | (2L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
iter.update(superbucket, new BucketId(usedBits + 2, superbucket.getId() | (1L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), superbucket);
assertEquals(bp.getProgress(), new BucketId(usedBits + 2, superbucket.getId() | (1L << usedBits)));
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
}
/**
* Test that ensures doing update(superbucket, 0) simply puts the bucket back in
* pending
*/
@Test
public void testNullAndSuperUpdate() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group = \"yahoo.com\"", idFactory, 16, progress);
assertEquals(progress.getPendingBucketCount(), 1);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getProgress(), new BucketId());
BucketId superbucket = bp.getSuperbucket();
BucketId sub = bp.getProgress();
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(superbucket, ProgressToken.NULL_BUCKET);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
VisitorIterator.BucketProgress bp2 = iter.getNext();
assertEquals(bp2.getSuperbucket(), superbucket);
assertEquals(bp2.getProgress(), ProgressToken.NULL_BUCKET);
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
iter.update(superbucket, superbucket);
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(progress.getPendingBucketCount(), 1);
assertEquals(progress.getActiveBucketCount(), 0);
bp2 = iter.getNext();
assertEquals(bp2.getSuperbucket(), superbucket);
assertEquals(bp2.getProgress(), superbucket);
assertEquals(progress.getPendingBucketCount(), 0);
assertEquals(progress.getActiveBucketCount(), 1);
}
@Test
public void testDeserializedFinishedProgress() {
StringBuilder finished = new StringBuilder();
finished.append("VDS bucket progress file\n");
finished.append(17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
finished.append(1L << 17);
finished.append('\n');
ProgressToken token = new ProgressToken(finished.toString());
assertEquals(token.getDistributionBitCount(), 17);
assertEquals(token.getTotalBucketCount(), 1L << 17);
assertEquals(token.getFinishedBucketCount(), 1L << 17);
assertEquals(token.getBucketCursor(), 1L << 17);
assertTrue(token.isFinished());
ProgressToken token2 = new ProgressToken(token.serialize());
assertEquals(17, token2.getDistributionBitCount());
assertEquals(1L << 17, token2.getTotalBucketCount());
assertEquals(1L << 17, token2.getFinishedBucketCount());
assertEquals(1L << 17, token2.getBucketCursor());
assertTrue(token2.isFinished());
}
@Test
public void testBucketProgressFraction() {
double epsilon = 0.00001;
BucketId b_0 = new BucketId();
BucketId b_100_0 = new BucketId(16, 1234);
BucketId b_50_1 = new BucketId(17, 1234);
BucketId b_100_1 = new BucketId(17, 1234 | (1 << 16));
BucketId b_25_2 = new BucketId(18, 1234);
BucketId b_50_2 = new BucketId(18, 1234 | (2 << 16));
BucketId b_75_2 = new BucketId(18, 1234 | (1 << 16));
BucketId b_100_2 = new BucketId(18, 1234 | (3 << 16));
ProgressToken p = new ProgressToken(16);
BucketId sb = new BucketId(16, 1234);
assertEquals(p.progressFraction(new BucketId(32, 1234), b_0), 0.0, epsilon);
assertEquals(p.progressFraction(sb, b_100_0), 1.0, epsilon);
assertEquals(p.progressFraction(sb, b_50_1), 0.5, epsilon);
assertEquals(p.progressFraction(sb, b_100_1), 1.0, epsilon);
assertEquals(p.progressFraction(sb, b_25_2), 0.25, epsilon);
assertEquals(p.progressFraction(sb, b_50_2), 0.5, epsilon);
assertEquals(p.progressFraction(sb, b_75_2), 0.75, epsilon);
assertEquals(p.progressFraction(sb, b_100_2), 1.0, epsilon);
assertEquals(p.progressFraction(new BucketId(0x8000000000000000L),
new BucketId(0xb0000fff00000000L)), 1.0, epsilon);
}
@Test
public void testProgressEstimation() throws ParseException {
int distBits = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken progress = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, distBits, progress);
assertEquals(progress.getDistributionBitCount(), 4);
double epsilon = 0.00001;
assertEquals(progress.percentFinished(), 0, epsilon);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 6.25, epsilon);
assertEquals(progress.getFinishedBucketCount(), 1);
bp = iter.getNext();
VisitorIterator.BucketProgress bp3 = iter.getNext();
VisitorIterator.BucketProgress bp4 = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 12.5, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp3.getSuperbucket(), new BucketId(distBits + 2, bp3.getSuperbucket().getId() | (1 << distBits)));
assertEquals(progress.percentFinished(), 17.1875, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp4.getSuperbucket(), new BucketId(distBits + 2, bp4.getSuperbucket().getId()));
assertEquals(progress.percentFinished(), 18.75, epsilon);
assertEquals(progress.getFinishedBucketCount(), 2);
iter.update(bp4.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.update(bp3.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(progress.percentFinished(), 25, epsilon);
assertEquals(progress.getFinishedBucketCount(), 4);
while (iter.hasNext()) {
bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertEquals(progress.getFinishedBucketCount(), 16);
assertEquals(progress.percentFinished(), 100, epsilon);
}
@Test
public void testBucketKeyWrapperOrdering() {
ProgressToken.BucketKeyWrapper bk1 = new ProgressToken.BucketKeyWrapper(0x0000000000000001L);
ProgressToken.BucketKeyWrapper bk2 = new ProgressToken.BucketKeyWrapper(0x7FFFFFFFFFFFFFFFL);
ProgressToken.BucketKeyWrapper bk3 = new ProgressToken.BucketKeyWrapper(0x8000000000000000L);
ProgressToken.BucketKeyWrapper bk4 = new ProgressToken.BucketKeyWrapper(0xFFFFFFFFFFFFFFFFL);
assertTrue(bk1.compareTo(bk2) < 0);
assertTrue(bk2.compareTo(bk3) < 0);
assertTrue(bk3.compareTo(bk4) < 0);
assertTrue(bk2.compareTo(bk1) > 0);
assertTrue(bk3.compareTo(bk2) > 0);
assertTrue(bk4.compareTo(bk3) > 0);
ProgressToken.BucketKeyWrapper bk5 = new ProgressToken.BucketKeyWrapper(0x7FFFFFFFFFFFFFFFL);
ProgressToken.BucketKeyWrapper bk6 = new ProgressToken.BucketKeyWrapper(0x8000000000000000L);
assertTrue(bk5.compareTo(bk2) == 0);
assertTrue(bk6.compareTo(bk3) == 0);
}
private void doTestBucketKeyGeneration(int db) {
ProgressToken.BucketKeyWrapper[] keys = new ProgressToken.BucketKeyWrapper[1 << db];
for (int i = 0; i < (1 << db); ++i) {
keys[i] = new ProgressToken.BucketKeyWrapper(
ProgressToken.bucketToKey(new BucketId(db, i).getId()));
}
Arrays.sort(keys);
boolean consistentKeys = true;
for (int i = 0; i < (1 << db); ++i) {
long genKey = ProgressToken.makeNthBucketKey(i, db);
long knownKey = keys[i].getKey();
if (genKey != knownKey) {
consistentKeys = false;
break;
}
}
assertTrue(consistentKeys);
}
@Test
public void testBucketKeyGeneration() {
for (int i = 1; i < 14; ++i) {
doTestBucketKeyGeneration(i);
}
}
@Test
public void testSingleBucketSplits() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 0));
iter.update(bp.getSuperbucket(), new BucketId());
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 0));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 0));
assertEquals(bp.getProgress(), new BucketId(0));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 4));
assertEquals(bp.getProgress(), new BucketId(0));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 2));
iter.update(bp.getSuperbucket(), new BucketId(db + 3, 0x12));
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 2));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 2));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x12));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 6));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x12));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 1));
iter.update(bp.getSuperbucket(), new BucketId(db + 3, 0x15));
assertEquals(p.getPendingBucketCount(), 1);
p.splitPendingBucket(new BucketId(db, 1));
assertEquals(p.getPendingBucketCount(), 2);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 1));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x15));
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, 5));
assertEquals(bp.getProgress(), new BucketId(db + 3, 0x15));
}
/**
* Test increasing the distribution bits for a full bucket space range
* source with no finished, active or pending buckets
* @throws ParseException upon docsel parse failure (shouldn't happen)
*/
@Test
public void testRangeDistributionBitIncrease1NoPending() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
assertEquals(p.getTotalBucketCount(), 4);
iter.setDistributionBitCount(db + 1);
assertEquals(p.getTotalBucketCount(), 8);
assertEquals(p.getDistributionBitCount(), db + 1);
assertEquals(iter.getDistributionBitCount(), db + 1);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db + 1);
int[] desired = new int[] { 0, 4, 2, 6, 1, 5, 3, 7 };
for (int i = 0; i < 8; ++i) {
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db + 1, desired[i]));
}
}
@Test
public void testRangeDistributionBitIncrease1AllBucketStates() throws ParseException {
int db = 3;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[3];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
bpp[2] = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
iter.update(bpp[2].getSuperbucket(), new BucketId());
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getPendingBucketCount(), 3);
assertEquals(p.getActiveBucketCount(), 0);
iter.setDistributionBitCount(db + 1);
assertEquals(p.getTotalBucketCount(), 16);
assertEquals(p.getFinishedBucketCount(), 2);
assertEquals(p.getPendingBucketCount(), 6);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getDistributionBitCount(), db + 1);
assertEquals(iter.getDistributionBitCount(), db + 1);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db + 1);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x04));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0C));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x02));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0A));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x06));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0E));
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x01));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x09));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x05));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x0D));
assertEquals(iter.getNext().getSuperbucket(), new BucketId(db + 1, 0x03));
}
@Test
public void testRangeDistributionIncreaseMultipleBits() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[2];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
VisitorIterator.BucketProgress bpa = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
iter.setDistributionBitCount(20);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getDistributionBitCount(), 20);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 20);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertTrue(iter.getBucketSource().shouldYield());
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getActiveBucketCount(), 1);
iter.update(bpa.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(p.getDistributionBitCount(), 20);
assertEquals(p.getPendingBucketCount(), 32);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 96);
assertEquals(p.getFinishedBucketCount(), 16 * 4);
for (int i = 0; i < 32; ++i) {
long testKey = ProgressToken.makeNthBucketKey(i + 48, 20);
VisitorIterator.BucketProgress bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(ProgressToken.keyToBucketId(testKey)));
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 16 * 6);
assertEquals(iter.getNext().getSuperbucket(), new BucketId(20, 0x6000));
}
@Test
public void testSingleBucketMerge() throws ParseException {
int db = 2;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress bp = iter.getNext();
iter.update(bp.getSuperbucket(), new BucketId());
p.splitPendingBucket(new BucketId(db, 0));
assertEquals(p.getPendingBucketCount(), 2);
p.mergePendingBucket(new BucketId(db + 1, 0));
assertEquals(p.getPendingBucketCount(), 1);
bp = iter.getNext();
assertEquals(bp.getSuperbucket(), new BucketId(db, 0));
}
@Test
public void testRangeDistributionBitDecrease1() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
assertTrue(src.isLosslessResetPossible());
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
assertFalse(src.isLosslessResetPossible());
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[6];
for (int i = 0; i < 6; ++i) {
bpp[i] = iter.getNext();
}
VisitorIterator.BucketProgress bpa = iter.getNext();
for (int i = 0; i < 6; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertEquals(p.getBucketCursor(), 10);
iter.setDistributionBitCount(db - 1);
assertEquals(iter.getDistributionBitCount(), db - 1);
assertEquals(p.getDistributionBitCount(), db);
assertEquals(iter.getBucketSource().getDistributionBitCount(), db - 1);
assertTrue(iter.getBucketSource().shouldYield());
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(bpa.getSuperbucket(), new BucketId());
assertEquals(p.getDistributionBitCount(), db - 1);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 4);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getBucketCursor(), 5);
}
@Test
public void testRangeDistributionBitIncreaseDecrease() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
assertTrue(src.isLosslessResetPossible());
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[4];
for (int i = 0; i < 4; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 4; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertFalse(src.isLosslessResetPossible());
iter.setDistributionBitCount(20);
assertEquals(p.getDistributionBitCount(), 20);
assertEquals(p.getPendingBucketCount(), 4 << 4);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getBucketCursor(), 5 << 4);
iter.setDistributionBitCount(16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 4);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getBucketCursor(), 5);
}
@Test
public void testRangeDistributionBitChangeWithoutDone() throws ParseException {
int db = 11;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[4];
for (int i = 0; i < 4; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 2; ++i) {
iter.update(bpp[i].getSuperbucket(), new BucketId());
}
assertFalse(src.isLosslessResetPossible());
iter.setDistributionBitCount(9);
assertEquals(p.getDistributionBitCount(), 11);
assertEquals(p.getActiveBucketCount(), 2);
assertEquals(p.getPendingBucketCount(), 2);
assertTrue(iter.getBucketSource().shouldYield());
iter.update(bpp[3].getSuperbucket(), new BucketId(15, bpp[3].getSuperbucket().getId()));
iter.setDistributionBitCount(12);
assertEquals(p.getActiveBucketCount(), 1);
assertEquals(p.getPendingBucketCount(), 3);
assertTrue(iter.getBucketSource().shouldYield());
String serialized = p.toString();
iter.update(bpp[2].getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 3 * 2);
assertFalse(iter.getBucketSource().shouldYield());
assertEquals(p.getFinishedBucketCount(), 2);
ProgressToken p2 = new ProgressToken(serialized);
assertEquals(p2.getDistributionBitCount(), 11);
BucketIdFactory idFactory2 = new BucketIdFactory();
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory2, 1, p2);
assertEquals(iter2.getDistributionBitCount(), 11);
assertEquals(p2.getDistributionBitCount(), 11);
iter2.setDistributionBitCount(12);
assertEquals(p2.getDistributionBitCount(), 12);
assertEquals(p2.getPendingBucketCount(), 8);
assertEquals(p2.getBucketCursor(), 8);
assertEquals(p2.getFinishedBucketCount(), 0);
}
@Test
public void testRangeDistributionBitInitialDrop() throws ParseException {
int db = 31;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress[] bp = new VisitorIterator.BucketProgress[3];
bp[0] = iter.getNext();
bp[1] = iter.getNext();
bp[2] = iter.getNext();
iter.update(bp[2].getSuperbucket(), new BucketId());
iter.update(bp[1].getSuperbucket(), new BucketId());
assertEquals(p.getActiveBucketCount(), 1);
iter.setDistributionBitCount(11);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(new BucketId(31, 0), new BucketId());
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
}
@Test
public void testRangeDistributionLosslessReset() throws ParseException {
int db = 1;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.DistributionRangeBucketSource src
= (VisitorIterator.DistributionRangeBucketSource)iter.getBucketSource();
VisitorIterator.BucketProgress[] bp = new VisitorIterator.BucketProgress[2];
bp[0] = iter.getNext();
bp[1] = iter.getNext();
String serialized = p.toString();
assertFalse(src.isLosslessResetPossible());
iter.update(bp[1].getSuperbucket(), new BucketId());
assertEquals(p.getActiveBucketCount(), 1);
iter.setDistributionBitCount(11);
assertFalse(src.isLosslessResetPossible());
assertEquals(p.getDistributionBitCount(), 1);
assertFalse(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getActiveBucketCount(), 1);
iter.update(new BucketId(1, 0), new BucketId());
assertTrue(iter.hasNext());
assertFalse(iter.isDone());
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
assertEquals(p.getDistributionBitCount(), 11);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
p = new ProgressToken(serialized);
idFactory = new BucketIdFactory();
iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
iter.setDistributionBitCount(11);
assertEquals(p.getPendingBucketCount(), 0);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getBucketCursor(), 0);
assertEquals(p.getDistributionBitCount(), 11);
bp[0] = iter.getNext();
assertEquals(bp[0].getSuperbucket(), new BucketId(11, 0));
}
@Test
public void testExplicitDistributionBitIncrease() throws ParseException {
int distBits = 12;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, p);
assertEquals(iter.getDistributionBitCount(), distBits);
assertEquals(p.getDistributionBitCount(), distBits);
assertEquals(iter.getBucketSource().getDistributionBitCount(), distBits);
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.setDistributionBitCount(16);
assertEquals(iter.getDistributionBitCount(), 16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getTotalBucketCount(), 3);
}
@Test
public void testExplicitDistributionBitDecrease() throws ParseException {
int distBits = 20;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, distBits, p);
assertEquals(iter.getDistributionBitCount(), distBits);
assertEquals(p.getDistributionBitCount(), distBits);
assertEquals(iter.getBucketSource().getDistributionBitCount(), distBits);
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
iter.setDistributionBitCount(16);
assertEquals(iter.getDistributionBitCount(), 16);
assertEquals(p.getDistributionBitCount(), 16);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 16);
assertEquals(p.getPendingBucketCount(), 2);
assertEquals(p.getFinishedBucketCount(), 1);
assertEquals(p.getTotalBucketCount(), 3);
}
@Test
public void testExplicitDistributionImportNoTruncation() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory, 20, p);
assertEquals(20, iter.getDistributionBitCount());
assertEquals(20, p.getDistributionBitCount());
assertEquals(20, iter.getBucketSource().getDistributionBitCount());
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
String serialized = p.toString();
ProgressToken p2 = new ProgressToken(serialized);
BucketIdFactory idFactory2 = new BucketIdFactory();
VisitorIterator iter2 = VisitorIterator.createFromDocumentSelection(
"id.user == 1234 or id.user == 6789 or id.user == 8009", idFactory2, 1, p2);
assertEquals(20, iter2.getDistributionBitCount());
assertEquals(20, p2.getDistributionBitCount());
assertEquals(20, iter2.getBucketSource().getDistributionBitCount());
assertEquals(2, p2.getPendingBucketCount());
assertEquals(1, p2.getFinishedBucketCount());
assertEquals(3, p2.getTotalBucketCount());
}
@Test
public void testImportProgressWithOutdatedDistribution() throws ParseException {
String input = "VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n";
int db = 12;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
assertEquals(10, p.getDistributionBitCount());
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
iter.setDistributionBitCount(12);
assertEquals(iter.getDistributionBitCount(), 12);
assertEquals(p.getDistributionBitCount(), 12);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 12);
assertEquals(p.getTotalBucketCount(), 1 << 12);
assertEquals(p.getFinishedBucketCount(), 500 << 2);
assertEquals(p.getPendingBucketCount(), 3 << 2);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 503 << 2);
assertTrue(iter.hasNext());
ProgressToken p2 = new ProgressToken(p.serialize());
assertEquals(p2.getDistributionBitCount(), 12);
assertEquals(p2.getTotalBucketCount(), 1 << 12);
assertEquals(p2.getFinishedBucketCount(), 500 << 2);
assertEquals(p2.getPendingBucketCount(), 3 << 2);
assertEquals(p2.getActiveBucketCount(), 0);
assertEquals(p2.getBucketCursor(), 503 << 2);
}
@Test
public void testImportInconsistentProgressIncrease() throws ParseException {
String input = "VDS bucket progress file\n" +
"7\n" +
"32\n" +
"24\n" +
"128\n" +
"100000000000000c:0\n";
int db = 8;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
assertEquals(7, p.getDistributionBitCount());
assertEquals(p.getTotalBucketCount(), 1 << 7);
assertEquals(p.getFinishedBucketCount(), 24);
assertEquals(p.getPendingBucketCount(), 1);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(32, p.getBucketCursor());
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
assertEquals(7, p.getDistributionBitCount());
assertEquals(p.getPendingBucketCount(), 1 << 3);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(24 + (1 << 3), p.getBucketCursor());
iter.setDistributionBitCount(8);
assertEquals(iter.getDistributionBitCount(), 8);
assertEquals(p.getDistributionBitCount(), 8);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 8);
assertEquals(p.getTotalBucketCount(), 1 << 8);
assertEquals(p.getFinishedBucketCount(), 24 << 1);
assertEquals(p.getPendingBucketCount(), 1 << 4);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 24*2 + (1 << 4));
assertTrue(iter.hasNext());
}
@Test
public void testImportInconsistentProgressDecrease() throws ParseException {
String input = "VDS bucket progress file\n" +
"7\n" +
"32\n" +
"24\n" +
"128\n" +
"100000000000000c:0\n";
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken(input);
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 1, p);
assertEquals(iter.getDistributionBitCount(), 7);
iter.setDistributionBitCount(6);
assertEquals(iter.getDistributionBitCount(), 6);
assertEquals(p.getDistributionBitCount(), 6);
assertEquals(iter.getBucketSource().getDistributionBitCount(), 6);
assertEquals(p.getTotalBucketCount(), 1 << 6);
assertEquals(p.getFinishedBucketCount(), 24 >> 1);
assertEquals(p.getPendingBucketCount(), 1 << 2);
assertEquals(p.getActiveBucketCount(), 0);
assertEquals(p.getBucketCursor(), 24/2 + (1 << 2));
assertTrue(iter.hasNext());
}
@Test
public void testEntireBucketSpaceCovered() throws ParseException {
int db = 4;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[3];
for (int i = 0; i < 3; ++i) {
bpp[i] = iter.getNext();
}
for (int i = 0; i < 3; ++i) {
iter.update(bpp[i].getSuperbucket(),
new BucketId(db + 1, bpp[i].getSuperbucket().getId()));
}
Set<BucketId> buckets = new TreeSet<BucketId>();
db = 7;
for (int i = 0; i < (1 << db); ++i) {
buckets.add(new BucketId(db, i));
}
iter.setDistributionBitCount(db);
assertEquals(p.getFinishedBucketCount(), 0);
assertEquals(p.getPendingBucketCount(), 3 << 3);
while (iter.hasNext()) {
VisitorIterator.BucketProgress bp = iter.getNext();
assertTrue(buckets.contains(bp.getSuperbucket()));
buckets.remove(bp.getSuperbucket());
}
assertTrue(buckets.isEmpty());
}
@Test
public void testExceptionOnWrongDocumentSelection() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
boolean caughtIt = false;
try {
ProgressToken p = new ProgressToken("VDS bucket progress file\n16\n3\n1\n3\n"
+ "8000000000001f49:0\n8000000000001a85:0\n");
VisitorIterator.createFromDocumentSelection("id.group != \"yahoo.com\"", idFactory, 16, p);
}
catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
caughtIt = false;
try {
ProgressToken p = new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n");
VisitorIterator.createFromDocumentSelection("id.group=\"yahoo.com\" or id.user=555", idFactory, 16, p);
}
catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testIsBucketFinished() throws ParseException {
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, 4, p);
assertFalse(p.isBucketFinished(new BucketId(32, 0)));
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(p.isBucketFinished(new BucketId(32, 0)));
assertFalse(p.isBucketFinished(new BucketId(32, 1 << 3)));
VisitorIterator.BucketProgress bp = iter.getNext();
assertFalse(p.isBucketFinished(new BucketId(32, 1 << 3)));
iter.update(bp.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertTrue(p.isBucketFinished(new BucketId(32, 1 << 3)));
assertTrue(p.isBucketFinished(new BucketId(32, 0x12345670)));
assertTrue(p.isBucketFinished(new BucketId(32, 0x12345678)));
assertFalse(p.isBucketFinished(new BucketId(32, 0x12345671)));
assertFalse(p.isBucketFinished(new BucketId(32, 0x12345679)));
}
@Test
public void testInconsistentState() throws ParseException {
int db = 16;
BucketIdFactory idFactory = new BucketIdFactory();
ProgressToken p = new ProgressToken();
VisitorIterator iter = VisitorIterator.createFromDocumentSelection(
"id.group != \"yahoo.com\"", idFactory, db, p);
for (int i = 0; i < 3; ++i) {
iter.update(iter.getNext().getSuperbucket(), ProgressToken.FINISHED_BUCKET);
}
VisitorIterator.BucketProgress[] bpp = new VisitorIterator.BucketProgress[2];
bpp[0] = iter.getNext();
bpp[1] = iter.getNext();
VisitorIterator.BucketProgress bpa = iter.getNext();
iter.update(bpp[0].getSuperbucket(), new BucketId());
iter.update(bpp[1].getSuperbucket(), new BucketId());
assertFalse(p.isInconsistentState());
iter.setDistributionBitCount(20);
assertTrue(p.isInconsistentState());
iter.update(bpa.getSuperbucket(), ProgressToken.FINISHED_BUCKET);
assertFalse(p.isInconsistentState());
}
@Test
public void testMalformedProgressFile() {
boolean caughtIt = false;
try {
new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testFailOnTooFewLinesInFile() {
boolean caughtIt = false;
try {
new ProgressToken("VDS bucket progress file\n" +
"10\n" +
"503\n");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testUnknownFirstHeaderLine() {
boolean caughtIt = false;
try {
new ProgressToken("Smurf Time 3000\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0");
} catch (IllegalArgumentException e) {
caughtIt = true;
}
assertTrue(caughtIt);
}
@Test
public void testBinaryProgressSerialization() {
String input = "VDS bucket progress file (48.828125% completed)\n" +
"10\n" +
"503\n" +
"500\n" +
"1024\n" +
"28000000000000be:0\n" +
"28000000000002be:0\n" +
"28000000000001be:0\n";
ProgressToken p = new ProgressToken(input);
byte[] buf = p.serialize();
ProgressToken p2 = new ProgressToken(buf);
assertEquals(input, p2.toString());
}
} | |
No, my bad. If the intention is anyway to just end up with the map, then no scope is needed. I was thinking we needed to reuse the code that filters on scope, but there is no need for that. | static String toEndpointsConfig(TestConfig config) throws IOException {
Cursor root = new Slime().setObject();
Cursor endpointsArray = root.setArray("endpoints");
config.deployments().get(config.zone()).forEach((cluster, url) -> {
Cursor endpointObject = endpointsArray.addObject();
endpointObject.setString("cluster", cluster);
endpointObject.setString("url", url.toString());
});
return new String(SlimeUtils.toJsonBytes(root), UTF_8);
} | config.deployments().get(config.zone()).forEach((cluster, url) -> { | static String toEndpointsConfig(TestConfig config) throws IOException {
Cursor root = new Slime().setObject();
Cursor endpointsArray = root.setArray("endpoints");
config.deployments().get(config.zone()).forEach((cluster, url) -> {
Cursor endpointObject = endpointsArray.addObject();
endpointObject.setString("cluster", cluster);
endpointObject.setString("url", url.toString());
});
return new String(SlimeUtils.toJsonBytes(root), UTF_8);
} | class VespaCliTestRunner implements TestRunner {
private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName());
private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>();
private final Path artifactsPath;
private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED);
@Inject
public VespaCliTestRunner(VespaCliTestRunnerConfig config) {
this(config.artifactsPath().resolve("artifacts"));
}
VespaCliTestRunner(Path artifactsPath) {
this.artifactsPath = artifactsPath;
}
@Override
public Collection<LogRecord> getLog(long after) {
return log.tailMap(after + 1).values();
}
@Override
public Status getStatus() {
return status.get();
}
@Override
public CompletableFuture<?> test(Suite suite, byte[] config) {
if (status.getAndSet(RUNNING) == RUNNING)
throw new IllegalStateException("Tests already running, not supposed to be started now");
return CompletableFuture.runAsync(() -> runTests(suite, config));
}
@Override
public boolean isSupported() {
return getChildDirectory(artifactsPath, "tests").isPresent();
}
void runTests(Suite suite, byte[] config) {
Process process = null;
try {
TestConfig testConfig = TestConfig.fromJson(config);
process = testRunProcessBuilder(suite, testConfig).start();
BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream()));
in.lines().forEach(line -> {
if (line.length() > 1 << 13)
line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)";
log(Level.INFO, line, null);
});
status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR);
}
catch (Exception e) {
if (process != null)
process.destroyForcibly();
log(Level.SEVERE, "Failed running tests", e);
status.set(ERROR);
}
}
ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException {
Path suitePath = getChildDirectory(artifactsPath, "tests")
.flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite)))
.orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'"));
ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(),
"--application", config.application().toFullString(),
"--endpoints", toEndpointsConfig(config),
"--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(),
"--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString());
builder.redirectErrorStream(true);
return builder;
}
private static String toSuiteDirectoryName(Suite suite) {
switch (suite) {
case SYSTEM_TEST: return "system-test";
case STAGING_SETUP_TEST: return "staging-setup";
case STAGING_TEST: return "staging-test";
default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'");
}
}
private void log(Level level, String message, Throwable thrown) {
LogRecord record = new LogRecord(level, message);
record.setThrown(thrown);
logger.log(record);
log.put(record.getSequenceNumber(), record);
}
private static Optional<Path> getChildDirectory(Path parent, String name) {
try (Stream<Path> children = Files.list(parent)) {
return children.filter(Files::isDirectory)
.filter(path -> path.endsWith(name))
.findAny();
}
catch (IOException e) {
throw new UncheckedIOException("Failed to list files under " + parent, e);
}
}
} | class VespaCliTestRunner implements TestRunner {
private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName());
private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>();
private final Path artifactsPath;
private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED);
@Inject
public VespaCliTestRunner(VespaCliTestRunnerConfig config) {
this(config.artifactsPath().resolve("artifacts"));
}
VespaCliTestRunner(Path artifactsPath) {
this.artifactsPath = artifactsPath;
}
@Override
public Collection<LogRecord> getLog(long after) {
return log.tailMap(after + 1).values();
}
@Override
public Status getStatus() {
return status.get();
}
@Override
public CompletableFuture<?> test(Suite suite, byte[] config) {
if (status.getAndSet(RUNNING) == RUNNING)
throw new IllegalStateException("Tests already running, not supposed to be started now");
return CompletableFuture.runAsync(() -> runTests(suite, config));
}
@Override
public boolean isSupported() {
return getChildDirectory(artifactsPath, "tests").isPresent();
}
void runTests(Suite suite, byte[] config) {
Process process = null;
try {
TestConfig testConfig = TestConfig.fromJson(config);
process = testRunProcessBuilder(suite, testConfig).start();
BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream()));
in.lines().forEach(line -> {
if (line.length() > 1 << 13)
line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)";
log(Level.INFO, line, null);
});
status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR);
}
catch (Exception e) {
if (process != null)
process.destroyForcibly();
log(Level.SEVERE, "Failed running tests", e);
status.set(ERROR);
}
}
ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException {
Path suitePath = getChildDirectory(artifactsPath, "tests")
.flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite)))
.orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'"));
ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(),
"--application", config.application().toFullString(),
"--endpoints", toEndpointsConfig(config),
"--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(),
"--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString());
builder.redirectErrorStream(true);
return builder;
}
private static String toSuiteDirectoryName(Suite suite) {
switch (suite) {
case SYSTEM_TEST: return "system-test";
case STAGING_SETUP_TEST: return "staging-setup";
case STAGING_TEST: return "staging-test";
default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'");
}
}
private void log(Level level, String message, Throwable thrown) {
LogRecord record = new LogRecord(level, message);
record.setThrown(thrown);
logger.log(record);
log.put(record.getSequenceNumber(), record);
}
private static Optional<Path> getChildDirectory(Path parent, String name) {
try (Stream<Path> children = Files.list(parent)) {
return children.filter(Files::isDirectory)
.filter(path -> path.endsWith(name))
.findAny();
}
catch (IOException e) {
throw new UncheckedIOException("Failed to list files under " + parent, e);
}
}
} |
Should this be `setMinPendingCount` instead of max? This currently will have the (perhaps surprising) effect that _setting_ `concurrency` actually reduces the potential maximum bucket parallelism. | private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) {
int wantedDocumentCount = Math.min(streamed ? Integer.MAX_VALUE : 1 << 10,
getProperty(request, WANTED_DOCUMENT_COUNT, integerParser)
.orElse(streamed ? Integer.MAX_VALUE : 1));
if (wantedDocumentCount <= 0)
throw new IllegalArgumentException("wantedDocumentCount must be positive");
Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser);
concurrency.ifPresent(value -> {
if (value <= 0)
throw new IllegalArgumentException("concurrency must be positive");
});
Optional<String> cluster = getProperty(request, CLUSTER);
if (cluster.isEmpty() && path.documentType().isEmpty())
throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level");
Optional<Integer> slices = getProperty(request, SLICES, integerParser);
Optional<Integer> sliceId = getProperty(request, SLICE_ID, integerParser);
VisitorParameters parameters = parseCommonParameters(request, path, cluster);
parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME)));
parameters.setMaxTotalHits(wantedDocumentCount);
StaticThrottlePolicy throttlePolicy;
if (streamed) {
throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1);
concurrency.ifPresent(throttlePolicy::setMaxPendingCount);
}
else {
throttlePolicy = new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)));
}
parameters.setThrottlePolicy(throttlePolicy);
parameters.visitInconsistentBuckets(true);
parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()));
if (slices.isPresent() && sliceId.isPresent())
parameters.slice(slices.get(), sliceId.get());
else if (slices.isPresent() != sliceId.isPresent())
throw new IllegalArgumentException("None or both of '" + SLICES + "' and '" + SLICE_ID + "' must be set");
return parameters;
} | concurrency.ifPresent(throttlePolicy::setMaxPendingCount); | private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) {
int wantedDocumentCount = Math.min(streamed ? Integer.MAX_VALUE : 1 << 10,
getProperty(request, WANTED_DOCUMENT_COUNT, integerParser)
.orElse(streamed ? Integer.MAX_VALUE : 1));
if (wantedDocumentCount <= 0)
throw new IllegalArgumentException("wantedDocumentCount must be positive");
Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser);
concurrency.ifPresent(value -> {
if (value <= 0)
throw new IllegalArgumentException("concurrency must be positive");
});
Optional<String> cluster = getProperty(request, CLUSTER);
if (cluster.isEmpty() && path.documentType().isEmpty())
throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level");
Optional<Integer> slices = getProperty(request, SLICES, integerParser);
Optional<Integer> sliceId = getProperty(request, SLICE_ID, integerParser);
VisitorParameters parameters = parseCommonParameters(request, path, cluster);
parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME)));
parameters.setMaxTotalHits(wantedDocumentCount);
StaticThrottlePolicy throttlePolicy;
if (streamed) {
throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1);
concurrency.ifPresent(throttlePolicy::setMaxPendingCount);
}
else {
throttlePolicy = new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)));
}
parameters.setThrottlePolicy(throttlePolicy);
parameters.visitInconsistentBuckets(true);
parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()));
if (slices.isPresent() && sliceId.isPresent())
parameters.slice(slices.get(), sliceId.get());
else if (slices.isPresent() != sliceId.isPresent())
throw new IllegalArgumentException("None or both of '" + SLICES + "' and '" + SLICE_ID + "' must be set");
return parameters;
} | class DocumentOperationParser {
private final DocumentTypeManager manager;
DocumentOperationParser(DocumentmanagerConfig config) {
this.manager = new DocumentTypeManager(config);
}
DocumentPut parsePut(InputStream inputStream, String docId) {
return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT);
}
DocumentUpdate parseUpdate(InputStream inputStream, String docId) {
return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE);
}
private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) {
return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId);
}
} | class DocumentOperationParser {
private final DocumentTypeManager manager;
DocumentOperationParser(DocumentmanagerConfig config) {
this.manager = new DocumentTypeManager(config);
}
DocumentPut parsePut(InputStream inputStream, String docId) {
return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT);
}
DocumentUpdate parseUpdate(InputStream inputStream, String docId) {
return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE);
}
private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) {
return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId);
}
} |
The idea was that it allows a limit on the concurrency, while the dynamic throttling takes care of scaling up, as necessary. Consider not wanting to exert much pressure on the system, while using streamed mode. | private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) {
int wantedDocumentCount = Math.min(streamed ? Integer.MAX_VALUE : 1 << 10,
getProperty(request, WANTED_DOCUMENT_COUNT, integerParser)
.orElse(streamed ? Integer.MAX_VALUE : 1));
if (wantedDocumentCount <= 0)
throw new IllegalArgumentException("wantedDocumentCount must be positive");
Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser);
concurrency.ifPresent(value -> {
if (value <= 0)
throw new IllegalArgumentException("concurrency must be positive");
});
Optional<String> cluster = getProperty(request, CLUSTER);
if (cluster.isEmpty() && path.documentType().isEmpty())
throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level");
Optional<Integer> slices = getProperty(request, SLICES, integerParser);
Optional<Integer> sliceId = getProperty(request, SLICE_ID, integerParser);
VisitorParameters parameters = parseCommonParameters(request, path, cluster);
parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME)));
parameters.setMaxTotalHits(wantedDocumentCount);
StaticThrottlePolicy throttlePolicy;
if (streamed) {
throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1);
concurrency.ifPresent(throttlePolicy::setMaxPendingCount);
}
else {
throttlePolicy = new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)));
}
parameters.setThrottlePolicy(throttlePolicy);
parameters.visitInconsistentBuckets(true);
parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()));
if (slices.isPresent() && sliceId.isPresent())
parameters.slice(slices.get(), sliceId.get());
else if (slices.isPresent() != sliceId.isPresent())
throw new IllegalArgumentException("None or both of '" + SLICES + "' and '" + SLICE_ID + "' must be set");
return parameters;
} | concurrency.ifPresent(throttlePolicy::setMaxPendingCount); | private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) {
int wantedDocumentCount = Math.min(streamed ? Integer.MAX_VALUE : 1 << 10,
getProperty(request, WANTED_DOCUMENT_COUNT, integerParser)
.orElse(streamed ? Integer.MAX_VALUE : 1));
if (wantedDocumentCount <= 0)
throw new IllegalArgumentException("wantedDocumentCount must be positive");
Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser);
concurrency.ifPresent(value -> {
if (value <= 0)
throw new IllegalArgumentException("concurrency must be positive");
});
Optional<String> cluster = getProperty(request, CLUSTER);
if (cluster.isEmpty() && path.documentType().isEmpty())
throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level");
Optional<Integer> slices = getProperty(request, SLICES, integerParser);
Optional<Integer> sliceId = getProperty(request, SLICE_ID, integerParser);
VisitorParameters parameters = parseCommonParameters(request, path, cluster);
parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME)));
parameters.setMaxTotalHits(wantedDocumentCount);
StaticThrottlePolicy throttlePolicy;
if (streamed) {
throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1);
concurrency.ifPresent(throttlePolicy::setMaxPendingCount);
}
else {
throttlePolicy = new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)));
}
parameters.setThrottlePolicy(throttlePolicy);
parameters.visitInconsistentBuckets(true);
parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()));
if (slices.isPresent() && sliceId.isPresent())
parameters.slice(slices.get(), sliceId.get());
else if (slices.isPresent() != sliceId.isPresent())
throw new IllegalArgumentException("None or both of '" + SLICES + "' and '" + SLICE_ID + "' must be set");
return parameters;
} | class DocumentOperationParser {
private final DocumentTypeManager manager;
DocumentOperationParser(DocumentmanagerConfig config) {
this.manager = new DocumentTypeManager(config);
}
DocumentPut parsePut(InputStream inputStream, String docId) {
return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT);
}
DocumentUpdate parseUpdate(InputStream inputStream, String docId) {
return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE);
}
private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) {
return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId);
}
} | class DocumentOperationParser {
private final DocumentTypeManager manager;
DocumentOperationParser(DocumentmanagerConfig config) {
this.manager = new DocumentTypeManager(config);
}
DocumentPut parsePut(InputStream inputStream, String docId) {
return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT);
}
DocumentUpdate parseUpdate(InputStream inputStream, String docId) {
return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE);
}
private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) {
return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId);
}
} |
Understood. Technically it's then more of a `max-concurrency` in the streaming case, whereas it immediately fires straight from the hip in regular buffered mode. | private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) {
int wantedDocumentCount = Math.min(streamed ? Integer.MAX_VALUE : 1 << 10,
getProperty(request, WANTED_DOCUMENT_COUNT, integerParser)
.orElse(streamed ? Integer.MAX_VALUE : 1));
if (wantedDocumentCount <= 0)
throw new IllegalArgumentException("wantedDocumentCount must be positive");
Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser);
concurrency.ifPresent(value -> {
if (value <= 0)
throw new IllegalArgumentException("concurrency must be positive");
});
Optional<String> cluster = getProperty(request, CLUSTER);
if (cluster.isEmpty() && path.documentType().isEmpty())
throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level");
Optional<Integer> slices = getProperty(request, SLICES, integerParser);
Optional<Integer> sliceId = getProperty(request, SLICE_ID, integerParser);
VisitorParameters parameters = parseCommonParameters(request, path, cluster);
parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME)));
parameters.setMaxTotalHits(wantedDocumentCount);
StaticThrottlePolicy throttlePolicy;
if (streamed) {
throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1);
concurrency.ifPresent(throttlePolicy::setMaxPendingCount);
}
else {
throttlePolicy = new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)));
}
parameters.setThrottlePolicy(throttlePolicy);
parameters.visitInconsistentBuckets(true);
parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()));
if (slices.isPresent() && sliceId.isPresent())
parameters.slice(slices.get(), sliceId.get());
else if (slices.isPresent() != sliceId.isPresent())
throw new IllegalArgumentException("None or both of '" + SLICES + "' and '" + SLICE_ID + "' must be set");
return parameters;
} | concurrency.ifPresent(throttlePolicy::setMaxPendingCount); | private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) {
int wantedDocumentCount = Math.min(streamed ? Integer.MAX_VALUE : 1 << 10,
getProperty(request, WANTED_DOCUMENT_COUNT, integerParser)
.orElse(streamed ? Integer.MAX_VALUE : 1));
if (wantedDocumentCount <= 0)
throw new IllegalArgumentException("wantedDocumentCount must be positive");
Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser);
concurrency.ifPresent(value -> {
if (value <= 0)
throw new IllegalArgumentException("concurrency must be positive");
});
Optional<String> cluster = getProperty(request, CLUSTER);
if (cluster.isEmpty() && path.documentType().isEmpty())
throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level");
Optional<Integer> slices = getProperty(request, SLICES, integerParser);
Optional<Integer> sliceId = getProperty(request, SLICE_ID, integerParser);
VisitorParameters parameters = parseCommonParameters(request, path, cluster);
parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME)));
parameters.setMaxTotalHits(wantedDocumentCount);
StaticThrottlePolicy throttlePolicy;
if (streamed) {
throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1);
concurrency.ifPresent(throttlePolicy::setMaxPendingCount);
}
else {
throttlePolicy = new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)));
}
parameters.setThrottlePolicy(throttlePolicy);
parameters.visitInconsistentBuckets(true);
parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()));
if (slices.isPresent() && sliceId.isPresent())
parameters.slice(slices.get(), sliceId.get());
else if (slices.isPresent() != sliceId.isPresent())
throw new IllegalArgumentException("None or both of '" + SLICES + "' and '" + SLICE_ID + "' must be set");
return parameters;
} | class DocumentOperationParser {
private final DocumentTypeManager manager;
DocumentOperationParser(DocumentmanagerConfig config) {
this.manager = new DocumentTypeManager(config);
}
DocumentPut parsePut(InputStream inputStream, String docId) {
return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT);
}
DocumentUpdate parseUpdate(InputStream inputStream, String docId) {
return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE);
}
private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) {
return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId);
}
} | class DocumentOperationParser {
private final DocumentTypeManager manager;
DocumentOperationParser(DocumentmanagerConfig config) {
this.manager = new DocumentTypeManager(config);
}
DocumentPut parsePut(InputStream inputStream, String docId) {
return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT);
}
DocumentUpdate parseUpdate(InputStream inputStream, String docId) {
return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE);
}
private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) {
return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId);
}
} |
Nice one 👍 | private void writeBufferToOutputStream(ResponseContentPart contentPart) throws Throwable {
callCompletionHandlerWhenDone(contentPart.handler, () -> {
ByteBuffer buffer = contentPart.buf;
final int bytesToSend = buffer.remaining();
try {
if (buffer.hasArray()) {
outputStream.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
} else {
final byte[] array = new byte[buffer.remaining()];
buffer.get(array);
outputStream.write(array);
}
metricReporter.successfulWrite(bytesToSend);
} catch (Throwable throwable) {
metricReporter.failedWrite();
throw throwable;
}
});
} | outputStream.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); | private void writeBufferToOutputStream(ResponseContentPart contentPart) throws Throwable {
callCompletionHandlerWhenDone(contentPart.handler, () -> {
ByteBuffer buffer = contentPart.buf;
final int bytesToSend = buffer.remaining();
try {
if (buffer.hasArray()) {
outputStream.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
} else {
final byte[] array = new byte[buffer.remaining()];
buffer.get(array);
outputStream.write(array);
}
metricReporter.successfulWrite(bytesToSend);
} catch (Throwable throwable) {
metricReporter.failedWrite();
throw throwable;
}
});
} | class ServletOutputStreamWriter {
/** Rules:
* 1) Don't modify the output stream without isReady returning true (write/flush/close).
* Multiple modification calls without interleaving isReady calls are not allowed.
* 2) If isReady returned false, no other calls should be made until the write listener is invoked.
* 3) If the write listener sees isReady == false, it must not do any modifications before its next invocation.
*/
private enum State {
NOT_STARTED,
WAITING_FOR_WRITE_POSSIBLE_CALLBACK,
WAITING_FOR_BUFFER,
WRITING_BUFFERS,
FINISHED_OR_ERROR
}
private static final Logger log = Logger.getLogger(ServletOutputStreamWriter.class.getName());
private static final ByteBuffer CLOSE_STREAM_BUFFER = ByteBuffer.allocate(0);
private final Object monitor = new Object();
private State state = State.NOT_STARTED;
private final ServletOutputStream outputStream;
private final Janitor janitor;
private final Deque<ResponseContentPart> responseContentQueue = new ArrayDeque<>();
private final RequestMetricReporter metricReporter;
/**
* When this future completes there will be no more calls against the servlet output stream or servlet response.
* The framework is still allowed to invoke us though.
*
* The future might complete in the servlet framework thread, user thread or executor thread.
*/
private final CompletableFuture<Void> finishedFuture = new CompletableFuture<>();
ServletOutputStreamWriter(ServletOutputStream outputStream, Janitor janitor, RequestMetricReporter metricReporter) {
this.outputStream = outputStream;
this.janitor = janitor;
this.metricReporter = metricReporter;
}
void writeBuffer(ByteBuffer buf, CompletionHandler handler) {
boolean thisThreadShouldWrite = false;
synchronized (monitor) {
if (state == State.FINISHED_OR_ERROR) {
janitor.scheduleTask(() -> handler.failed(new IllegalStateException("ContentChannel already closed.")));
return;
}
responseContentQueue.addLast(new ResponseContentPart(buf, handler));
switch (state) {
case NOT_STARTED:
state = State.WAITING_FOR_WRITE_POSSIBLE_CALLBACK;
outputStream.setWriteListener(writeListener);
break;
case WAITING_FOR_WRITE_POSSIBLE_CALLBACK:
case WRITING_BUFFERS:
break;
case WAITING_FOR_BUFFER:
thisThreadShouldWrite = true;
state = State.WRITING_BUFFERS;
break;
default:
throw new IllegalStateException("Invalid state " + state);
}
}
if (thisThreadShouldWrite) {
writeBuffersInQueueToOutputStream();
}
}
void fail(Throwable t) { setFinished(t); }
void close(CompletionHandler handler) { writeBuffer(CLOSE_STREAM_BUFFER, handler); }
void close() { close(NOOP_COMPLETION_HANDLER); }
CompletableFuture<Void> finishedFuture() { return finishedFuture; }
private void writeBuffersInQueueToOutputStream() {
boolean lastOperationWasFlush = false;
while (true) {
ResponseContentPart contentPart;
synchronized (monitor) {
if (state == State.FINISHED_OR_ERROR) {
return;
}
assertStateIs(state, State.WRITING_BUFFERS);
if (!outputStream.isReady()) {
state = State.WAITING_FOR_WRITE_POSSIBLE_CALLBACK;
return;
}
contentPart = responseContentQueue.pollFirst();
if (contentPart == null && lastOperationWasFlush) {
state = State.WAITING_FOR_BUFFER;
return;
}
}
try {
boolean isFlush = contentPart == null;
if (isFlush) {
outputStream.flush();
lastOperationWasFlush = true;
continue;
}
lastOperationWasFlush = false;
if (contentPart.buf == CLOSE_STREAM_BUFFER) {
callCompletionHandlerWhenDone(contentPart.handler, outputStream::close);
setFinished(null);
return;
} else {
writeBufferToOutputStream(contentPart);
}
} catch (Throwable t) {
setFinished(t);
return;
}
}
}
private void setFinished(Throwable t) {
synchronized (monitor) {
state = State.FINISHED_OR_ERROR;
if (!responseContentQueue.isEmpty()) {
failAllParts_holdingLock(t != null ? t : new IllegalStateException("ContentChannel closed."));
}
}
assert !Thread.holdsLock(monitor);
if (t != null) {
finishedFuture.completeExceptionally(t);
} else {
finishedFuture.complete(null);
}
}
private void failAllParts_holdingLock(Throwable e) {
assert Thread.holdsLock(monitor);
ArrayList<ResponseContentPart> failedParts = new ArrayList<>(responseContentQueue);
responseContentQueue.clear();
@SuppressWarnings("ThrowableInstanceNeverThrown")
RuntimeException failReason = new RuntimeException("Failing due to earlier ServletOutputStream write failure", e);
Consumer<ResponseContentPart> failCompletionHandler = responseContentPart ->
runCompletionHandler_logOnExceptions(
() -> responseContentPart.handler.failed(failReason));
janitor.scheduleTask(() -> failedParts.forEach(failCompletionHandler));
}
private static void callCompletionHandlerWhenDone(CompletionHandler handler, IORunnable runnable) throws Exception {
try {
runnable.run();
} catch (Throwable e) {
runCompletionHandler_logOnExceptions(() -> handler.failed(e));
throw e;
}
handler.completed();
}
private static void runCompletionHandler_logOnExceptions(Runnable runnable) {
try {
runnable.run();
} catch (Throwable e) {
log.log(Level.WARNING, "Unexpected exception from CompletionHandler.", e);
}
}
private static void assertStateIs(State currentState, State expectedState) {
if (currentState != expectedState) {
AssertionError error = new AssertionError("Expected state " + expectedState + ", got state " + currentState);
log.log(Level.WARNING, "Assertion failed.", error);
throw error;
}
}
private final WriteListener writeListener = new WriteListener() {
@Override
public void onWritePossible() {
synchronized (monitor) {
if (state == State.FINISHED_OR_ERROR) {
return;
}
assertStateIs(state, State.WAITING_FOR_WRITE_POSSIBLE_CALLBACK);
state = State.WRITING_BUFFERS;
}
writeBuffersInQueueToOutputStream();
}
@Override public void onError(Throwable t) { setFinished(t); }
};
private static class ResponseContentPart {
public final ByteBuffer buf;
public final CompletionHandler handler;
public ResponseContentPart(ByteBuffer buf, CompletionHandler handler) {
this.buf = buf;
this.handler = handler;
}
}
@FunctionalInterface
private interface IORunnable {
void run() throws IOException;
}
} | class ServletOutputStreamWriter {
/** Rules:
* 1) Don't modify the output stream without isReady returning true (write/flush/close).
* Multiple modification calls without interleaving isReady calls are not allowed.
* 2) If isReady returned false, no other calls should be made until the write listener is invoked.
* 3) If the write listener sees isReady == false, it must not do any modifications before its next invocation.
*/
private enum State {
NOT_STARTED,
WAITING_FOR_WRITE_POSSIBLE_CALLBACK,
WAITING_FOR_BUFFER,
WRITING_BUFFERS,
FINISHED_OR_ERROR
}
private static final Logger log = Logger.getLogger(ServletOutputStreamWriter.class.getName());
private static final ByteBuffer CLOSE_STREAM_BUFFER = ByteBuffer.allocate(0);
private final Object monitor = new Object();
private State state = State.NOT_STARTED;
private final ServletOutputStream outputStream;
private final Janitor janitor;
private final Deque<ResponseContentPart> responseContentQueue = new ArrayDeque<>();
private final RequestMetricReporter metricReporter;
/**
* When this future completes there will be no more calls against the servlet output stream or servlet response.
* The framework is still allowed to invoke us though.
*
* The future might complete in the servlet framework thread, user thread or executor thread.
*/
private final CompletableFuture<Void> finishedFuture = new CompletableFuture<>();
ServletOutputStreamWriter(ServletOutputStream outputStream, Janitor janitor, RequestMetricReporter metricReporter) {
this.outputStream = outputStream;
this.janitor = janitor;
this.metricReporter = metricReporter;
}
void writeBuffer(ByteBuffer buf, CompletionHandler handler) {
boolean thisThreadShouldWrite = false;
synchronized (monitor) {
if (state == State.FINISHED_OR_ERROR) {
janitor.scheduleTask(() -> handler.failed(new IllegalStateException("ContentChannel already closed.")));
return;
}
responseContentQueue.addLast(new ResponseContentPart(buf, handler));
switch (state) {
case NOT_STARTED:
state = State.WAITING_FOR_WRITE_POSSIBLE_CALLBACK;
outputStream.setWriteListener(writeListener);
break;
case WAITING_FOR_WRITE_POSSIBLE_CALLBACK:
case WRITING_BUFFERS:
break;
case WAITING_FOR_BUFFER:
thisThreadShouldWrite = true;
state = State.WRITING_BUFFERS;
break;
default:
throw new IllegalStateException("Invalid state " + state);
}
}
if (thisThreadShouldWrite) {
writeBuffersInQueueToOutputStream();
}
}
void fail(Throwable t) { setFinished(t); }
void close(CompletionHandler handler) { writeBuffer(CLOSE_STREAM_BUFFER, handler); }
void close() { close(NOOP_COMPLETION_HANDLER); }
CompletableFuture<Void> finishedFuture() { return finishedFuture; }
private void writeBuffersInQueueToOutputStream() {
boolean lastOperationWasFlush = false;
while (true) {
ResponseContentPart contentPart;
synchronized (monitor) {
if (state == State.FINISHED_OR_ERROR) {
return;
}
assertStateIs(state, State.WRITING_BUFFERS);
if (!outputStream.isReady()) {
state = State.WAITING_FOR_WRITE_POSSIBLE_CALLBACK;
return;
}
contentPart = responseContentQueue.pollFirst();
if (contentPart == null && lastOperationWasFlush) {
state = State.WAITING_FOR_BUFFER;
return;
}
}
try {
boolean isFlush = contentPart == null;
if (isFlush) {
outputStream.flush();
lastOperationWasFlush = true;
continue;
}
lastOperationWasFlush = false;
if (contentPart.buf == CLOSE_STREAM_BUFFER) {
callCompletionHandlerWhenDone(contentPart.handler, outputStream::close);
setFinished(null);
return;
} else {
writeBufferToOutputStream(contentPart);
}
} catch (Throwable t) {
setFinished(t);
return;
}
}
}
private void setFinished(Throwable t) {
synchronized (monitor) {
state = State.FINISHED_OR_ERROR;
if (!responseContentQueue.isEmpty()) {
failAllParts_holdingLock(t != null ? t : new IllegalStateException("ContentChannel closed."));
}
}
assert !Thread.holdsLock(monitor);
if (t != null) {
finishedFuture.completeExceptionally(t);
} else {
finishedFuture.complete(null);
}
}
private void failAllParts_holdingLock(Throwable e) {
assert Thread.holdsLock(monitor);
ArrayList<ResponseContentPart> failedParts = new ArrayList<>(responseContentQueue);
responseContentQueue.clear();
@SuppressWarnings("ThrowableInstanceNeverThrown")
RuntimeException failReason = new RuntimeException("Failing due to earlier ServletOutputStream write failure", e);
Consumer<ResponseContentPart> failCompletionHandler = responseContentPart ->
runCompletionHandler_logOnExceptions(
() -> responseContentPart.handler.failed(failReason));
janitor.scheduleTask(() -> failedParts.forEach(failCompletionHandler));
}
private static void callCompletionHandlerWhenDone(CompletionHandler handler, IORunnable runnable) throws Exception {
try {
runnable.run();
} catch (Throwable e) {
runCompletionHandler_logOnExceptions(() -> handler.failed(e));
throw e;
}
handler.completed();
}
private static void runCompletionHandler_logOnExceptions(Runnable runnable) {
try {
runnable.run();
} catch (Throwable e) {
log.log(Level.WARNING, "Unexpected exception from CompletionHandler.", e);
}
}
private static void assertStateIs(State currentState, State expectedState) {
if (currentState != expectedState) {
AssertionError error = new AssertionError("Expected state " + expectedState + ", got state " + currentState);
log.log(Level.WARNING, "Assertion failed.", error);
throw error;
}
}
private final WriteListener writeListener = new WriteListener() {
@Override
public void onWritePossible() {
synchronized (monitor) {
if (state == State.FINISHED_OR_ERROR) {
return;
}
assertStateIs(state, State.WAITING_FOR_WRITE_POSSIBLE_CALLBACK);
state = State.WRITING_BUFFERS;
}
writeBuffersInQueueToOutputStream();
}
@Override public void onError(Throwable t) { setFinished(t); }
};
private static class ResponseContentPart {
public final ByteBuffer buf;
public final CompletionHandler handler;
public ResponseContentPart(ByteBuffer buf, CompletionHandler handler) {
this.buf = buf;
this.handler = handler;
}
}
@FunctionalInterface
private interface IORunnable {
void run() throws IOException;
}
} |
The error message should be something about using ckms instead for non-public? | private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new RuntimeException("cloud secret store is not available in this zone");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new RuntimeException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new RuntimeException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
} | throw new RuntimeException("cloud secret store is not available in this zone"); | private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new RuntimeException("cloud secret store is not supported in non-public system, please see documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new RuntimeException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new RuntimeException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String DEPRECATED_CONTAINER_TAG = "jdisc";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
checkTagName(spec, log);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addServlets(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addModelEvaluationBundles(cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if(deployState.isHosted()) {
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
}
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
if ( ! hasZooKeeper(spec)) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) {
if ( ! context.getDeployState().isHosted()) return;
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec);
});
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toUnmodifiableSet());
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent)) {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "qrs"));
}
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addAdditionalHostedConnector(deployState, cluster, context);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ;
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
HostedSslConnectorFactory connectorFactory;
Collection<String> tlsCiphersOverride = deployState.getProperties().tlsCiphersOverride();
Duration maxConnectionLife = Duration.ofSeconds(deployState.featureFlags().maxConnectionLifeInHosted());
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
boolean enforceHandshakeClientAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth.equals(AccessControl.ClientAuthentication.need))
.orElse(false);
connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(
serverName, endpointCertificateSecrets, getTlsClientAuthorities(deployState), tlsCiphersOverride, maxConnectionLife)
: HostedSslConnectorFactory.withProvidedCertificate(
serverName, endpointCertificateSecrets, enforceHandshakeClientAuth, tlsCiphersOverride, maxConnectionLife);
} else {
connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName, tlsCiphersOverride, maxConnectionLife);
}
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
/*
Return trusted certificates as a PEM encoded string containing the concatenation of
trusted certs from the application package and all operator certificates.
*/
String getTlsClientAuthorities(DeployState deployState) {
List<X509Certificate> trustedCertificates = deployState.tlsClientAuthority()
.map(X509CertificateUtils::certificateListFromPem)
.orElse(Collections.emptyList());
ArrayList<X509Certificate> x509Certificates = new ArrayList<>(trustedCertificates);
x509Certificates.addAll(deployState.getProperties().operatorCertificates());
return X509CertificateUtils.toPem(x509Certificates);
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, cluster.isHostedVespa());
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.readEnabled(false)
.writeEnabled(false)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = profiles.getOnnxModels().get(modelElement.getAttribute("name"));
if (onnxModel == null)
continue;
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationBundles(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false)
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void checkTagName(Element spec, DeployLogger logger) {
if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) {
logger.logApplicationPackage(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead.");
}
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(DeployState deployState, String jvmGCOPtions) {
String options = (jvmGCOPtions != null)
? jvmGCOPtions
: deployState.getProperties().jvmGCOptions();
return (options == null || options.isEmpty())
? (deployState.isHosted() ? ContainerCluster.CMS : ContainerCluster.G1GC)
: options;
}
private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options.");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.logApplicationPackage(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.G1GC);
}
}
return jvmOptions;
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, jvmElement, context);
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (!environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepository(deployState.getWantedDockerImageRepo())
.build();
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log);
return createNodesFromHosts(log, hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployLogger());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log,
hasZooKeeper(containerElement));
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger,
Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
cluster.addComponent(
new SearchHandler(
cluster,
serverBindings(searchElement, SearchHandler.DEFAULT_BINDING),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null)));
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH));
cluster.addComponent(guiHandler);
}
private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.of(defaultBindings);
return toBindingList(bindings);
}
private List<BindingPattern> toBindingList(List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(UserBindingPattern.fromPattern(text));
}
return result;
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static boolean hasZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper") != null;
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName());
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String DEPRECATED_CONTAINER_TAG = "jdisc";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
checkTagName(spec, log);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addServlets(deployState, spec, cluster);
addModelEvaluation(spec, cluster, context);
addModelEvaluationBundles(cluster);
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
addDocumentApi(spec, cluster);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addRoutingAliases(cluster, spec, deployState.zone().environment());
addNodes(cluster, spec, context);
addClientProviders(deployState, spec, cluster);
addServerProviders(deployState, spec, cluster);
addAthensCopperArgos(cluster, context);
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if(deployState.isHosted()) {
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
}
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
if ( ! hasZooKeeper(spec)) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) {
if ( ! context.getDeployState().isHosted()) return;
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec);
});
}
private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance());
if (instance.isEmpty()) return false;
return instance.get().zones().stream()
.anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toUnmodifiableSet());
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, spec, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec));
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault())
cluster.addDefaultSearchAccessLog();
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent)) {
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "qrs"));
}
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addAdditionalHostedConnector(deployState, cluster, context);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ;
}
private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
HostedSslConnectorFactory connectorFactory;
Collection<String> tlsCiphersOverride = deployState.getProperties().tlsCiphersOverride();
Duration maxConnectionLife = Duration.ofSeconds(deployState.featureFlags().maxConnectionLifeInHosted());
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https:
}
EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get();
boolean enforceHandshakeClientAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth.equals(AccessControl.ClientAuthentication.need))
.orElse(false);
connectorFactory = authorizeClient
? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(
serverName, endpointCertificateSecrets, getTlsClientAuthorities(deployState), tlsCiphersOverride, maxConnectionLife)
: HostedSslConnectorFactory.withProvidedCertificate(
serverName, endpointCertificateSecrets, enforceHandshakeClientAuth, tlsCiphersOverride, maxConnectionLife);
} else {
connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName, tlsCiphersOverride, maxConnectionLife);
}
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
/*
Return trusted certificates as a PEM encoded string containing the concatenation of
trusted certs from the application package and all operator certificates.
*/
String getTlsClientAuthorities(DeployState deployState) {
List<X509Certificate> trustedCertificates = deployState.tlsClientAuthority()
.map(X509CertificateUtils::certificateListFromPem)
.orElse(Collections.emptyList());
ArrayList<X509Certificate> x509Certificates = new ArrayList<>(trustedCertificates);
x509Certificates.addAll(deployState.getProperties().operatorCertificates());
return X509CertificateUtils.toPem(x509Certificates);
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
var deployState = context.getDeployState();
boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester();
return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication;
}
private static void addHostedImplicitHttpIfNotPresent(ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, cluster.isHostedVespa());
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.readEnabled(false)
.writeEnabled(false)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet"))
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(cluster, searchElement);
addGUIHandler(cluster);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = profiles.getOnnxModels().get(modelElement.getAttribute("name"));
if (onnxModel == null)
continue;
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationBundles(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false)
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void checkTagName(Element spec, DeployLogger logger) {
if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) {
logger.logApplicationPackage(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead.");
}
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
static boolean incompatibleGCOptions(String jvmargs) {
Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC");
Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS");
return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find());
}
private static String buildJvmGCOptions(DeployState deployState, String jvmGCOPtions) {
String options = (jvmGCOPtions != null)
? jvmGCOPtions
: deployState.getProperties().jvmGCOptions();
return (options == null || options.isEmpty())
? (deployState.isHosted() ? ContainerCluster.CMS : ContainerCluster.G1GC)
: options;
}
private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) {
String jvmOptions;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) {
String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" +
" and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options.");
}
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
deployLogger.logApplicationPackage(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.G1GC);
}
}
return jvmOptions;
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger()));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions));
}
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
}
void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, jvmElement, context);
}
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT));
if (!environmentVars.isEmpty()) {
cluster.setEnvironmentVars(environmentVars);
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private static String getEnvironmentVariables(Element environmentVariables) {
StringBuilder sb = new StringBuilder();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' ');
}
}
return sb.toString();
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(deployState.getWantedNodeVespaVersion())
.dockerImageRepository(deployState.getWantedDockerImageRepo())
.build();
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()),
false,
!deployState.getProperties().isBootstrap());
var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log);
return createNodesFromHosts(log, hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployLogger());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log,
hasZooKeeper(containerElement));
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context.getDeployLogger());
return createNodesFromHosts(context.getDeployLogger(), hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger,
Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) {
cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(),
"com.yahoo.search.searchchain.ExecutionFactory"));
cluster.addComponent(
new SearchHandler(
cluster,
serverBindings(searchElement, SearchHandler.DEFAULT_BINDING),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null)));
}
private void addGUIHandler(ApplicationContainerCluster cluster) {
Handler<?> guiHandler = new GUIHandler();
guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH));
cluster.addComponent(guiHandler);
}
private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.of(defaultBindings);
return toBindingList(bindings);
}
private List<BindingPattern> toBindingList(List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(UserBindingPattern.fromPattern(text));
}
return result;
}
private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec, String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.instance(app.getApplicationId().instance())
.flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
.or(() -> spec.athenzService())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone);
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static boolean hasZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper") != null;
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName());
}
} |
It is necessary I think as you get the document id with default. So it will access disk. | public void process(boolean validate, boolean documentsOnly) {
DocumentSummary defaultSummary = schema.getSummariesInThis().get("default");
if (defaultSummary == null) {
defaultSummary = new DocumentSummary("default", schema);
defaultSummary.setFromDisk(true);
schema.addSummary(defaultSummary);
}
for (SDField field : schema.allConcreteFields()) {
collectSummaries(field, schema, validate);
}
for (DocumentSummary documentSummary : schema.getSummaries().values()) {
documentSummary.purgeImplicits();
}
} | defaultSummary.setFromDisk(true); | public void process(boolean validate, boolean documentsOnly) {
DocumentSummary defaultSummary = schema.getSummariesInThis().get("default");
if (defaultSummary == null) {
defaultSummary = new DocumentSummary("default", schema);
defaultSummary.setFromDisk(true);
schema.addSummary(defaultSummary);
}
for (SDField field : schema.allConcreteFields()) {
collectSummaries(field, schema, validate);
}
for (DocumentSummary documentSummary : schema.getSummaries().values()) {
documentSummary.purgeImplicits();
}
} | class ImplicitSummaries extends Processor {
public ImplicitSummaries(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
private void addSummaryFieldSources(SummaryField summaryField, SDField sdField) {
sdField.addSummaryFieldSources(summaryField);
}
private void collectSummaries(SDField field , Schema schema, boolean validate) {
SummaryField addedSummaryField = null;
String fieldName = field.getName();
SummaryField fieldSummaryField = field.getSummaryField(fieldName);
if (fieldSummaryField == null && field.doesSummarying()) {
fieldSummaryField = new SummaryField(fieldName, field.getDataType());
fieldSummaryField.setImplicit(true);
addSummaryFieldSources(fieldSummaryField, field);
fieldSummaryField.addDestination("default");
field.addSummaryField(fieldSummaryField);
addedSummaryField = fieldSummaryField;
}
if (fieldSummaryField != null) {
for (String dest : fieldSummaryField.getDestinations()) {
DocumentSummary summary = schema.getSummariesInThis().get(dest);
if (summary != null) {
summary.add(fieldSummaryField);
}
}
}
for (Attribute attribute : field.getAttributes().values()) {
if (attribute.getName().equals(fieldName)) {
if (addedSummaryField != null) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (attribute.isPrefetch()) {
addPrefetchAttribute(attribute, field, schema);
}
}
}
if (addedSummaryField != null && isComplexFieldWithOnlyStructFieldAttributes(field)) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
if (field.doesSummarying()) {
for (Attribute attribute : field.getAttributes().values()) {
if ( ! attribute.isPosition()) continue;
DocumentSummary attributePrefetchSummary = getOrCreateAttributePrefetchSummary(schema);
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getDistanceSummaryFieldName(fieldName)));
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getPositionSummaryFieldName(fieldName)));
}
}
for (SummaryField summaryField : field.getSummaryFields().values()) {
Attribute attribute = field.getAttributes().get(fieldName);
if (attribute != null && summaryField.getTransform() == SummaryTransform.NONE) {
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (isValid(summaryField, schema, validate)) {
addToDestinations(summaryField, schema);
}
}
}
private DocumentSummary getOrCreateAttributePrefetchSummary(Schema schema) {
DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch");
if (summary == null) {
summary = new DocumentSummary("attributeprefetch", schema);
schema.addSummary(summary);
}
return summary;
}
private void addPrefetchAttribute(Attribute attribute, SDField field, Schema schema) {
if (attribute.getPrefetchValue() == null) {
SummaryField fieldSummaryField = field.getSummaryField(attribute.getName());
if (fieldSummaryField != null && fieldSummaryField.getTransform().isDynamic()) return;
SummaryField explicitSummaryField = schema.getExplicitSummaryField(attribute.getName());
if (explicitSummaryField != null && explicitSummaryField.getTransform().isDynamic()) return;
}
DocumentSummary summary = getOrCreateAttributePrefetchSummary(schema);
SummaryField attributeSummaryField = new SummaryField(attribute.getName(), attribute.getDataType());
attributeSummaryField.addSource(attribute.getName());
attributeSummaryField.addDestination("attributeprefetch");
attributeSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
summary.add(attributeSummaryField);
}
private boolean isValid(SummaryField summaryField, Schema schema, boolean validate) {
if (summaryField.getTransform() == SummaryTransform.DISTANCE ||
summaryField.getTransform() == SummaryTransform.POSITIONS) {
int sourceCount = summaryField.getSourceCount();
if (validate && sourceCount != 1) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Expected 1 source field, got " + sourceCount + ".");
}
String sourceName = summaryField.getSingleSource();
if (validate && schema.getAttribute(sourceName) == null) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Summary source attribute '" + sourceName + "' not found.");
}
return true;
}
String fieldName = summaryField.getSourceField();
SDField sourceField = schema.getConcreteField(fieldName);
if (validate && sourceField == null) {
throw newProcessException(schema, summaryField, "Source field '" + fieldName + "' does not exist.");
}
if (! sourceField.doesSummarying() &&
! summaryField.getTransform().equals(SummaryTransform.ATTRIBUTE) &&
! summaryField.getTransform().equals(SummaryTransform.GEOPOS))
{
deployLogger.logApplicationPackage(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
" is not creating a summary value in its indexing statement");
return false;
}
if (summaryField.getTransform().isDynamic()
&& summaryField.getName().equals(sourceField.getName())
&& sourceField.doesAttributing()) {
Attribute attribute = sourceField.getAttributes().get(sourceField.getName());
if (attribute != null) {
String destinations = "document summary 'default'";
if (summaryField.getDestinations().size() >0) {
destinations = "document summaries " + summaryField.getDestinations();
}
deployLogger.logApplicationPackage(Level.WARNING,
"Will fetch the disk summary value of " + sourceField + " in " + destinations +
" since this summary field uses a dynamic summary value (snippet/bolding): Dynamic summaries and bolding " +
"is not supported with summary values fetched from in-memory attributes yet. If you want to see partial updates " +
"to this attribute, remove any bolding and dynamic snippeting from this field");
}
}
return true;
}
private void addToDestinations(SummaryField summaryField, Schema schema) {
if (summaryField.getDestinations().size() == 0) {
addToDestination("default", summaryField, schema);
}
else {
for (String destinationName : summaryField.getDestinations())
addToDestination(destinationName, summaryField, schema);
}
}
private void addToDestination(String destinationName, SummaryField summaryField, Schema schema) {
DocumentSummary destination = schema.getSummariesInThis().get(destinationName);
if (destination == null) {
destination = new DocumentSummary(destinationName, schema);
schema.addSummary(destination);
destination.add(summaryField);
}
else {
SummaryField existingField= destination.getSummaryField(summaryField.getName());
SummaryField merged = summaryField.mergeWith(existingField);
destination.add(merged);
}
}
} | class ImplicitSummaries extends Processor {
public ImplicitSummaries(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
private void addSummaryFieldSources(SummaryField summaryField, SDField sdField) {
sdField.addSummaryFieldSources(summaryField);
}
private void collectSummaries(SDField field , Schema schema, boolean validate) {
SummaryField addedSummaryField = null;
String fieldName = field.getName();
SummaryField fieldSummaryField = field.getSummaryField(fieldName);
if (fieldSummaryField == null && field.doesSummarying()) {
fieldSummaryField = new SummaryField(fieldName, field.getDataType());
fieldSummaryField.setImplicit(true);
addSummaryFieldSources(fieldSummaryField, field);
fieldSummaryField.addDestination("default");
field.addSummaryField(fieldSummaryField);
addedSummaryField = fieldSummaryField;
}
if (fieldSummaryField != null) {
for (String dest : fieldSummaryField.getDestinations()) {
DocumentSummary summary = schema.getSummariesInThis().get(dest);
if (summary != null) {
summary.add(fieldSummaryField);
}
}
}
for (Attribute attribute : field.getAttributes().values()) {
if (attribute.getName().equals(fieldName)) {
if (addedSummaryField != null) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (attribute.isPrefetch()) {
addPrefetchAttribute(attribute, field, schema);
}
}
}
if (addedSummaryField != null && isComplexFieldWithOnlyStructFieldAttributes(field)) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
if (field.doesSummarying()) {
for (Attribute attribute : field.getAttributes().values()) {
if ( ! attribute.isPosition()) continue;
DocumentSummary attributePrefetchSummary = getOrCreateAttributePrefetchSummary(schema);
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getDistanceSummaryFieldName(fieldName)));
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getPositionSummaryFieldName(fieldName)));
}
}
for (SummaryField summaryField : field.getSummaryFields().values()) {
Attribute attribute = field.getAttributes().get(fieldName);
if (attribute != null && summaryField.getTransform() == SummaryTransform.NONE) {
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (isValid(summaryField, schema, validate)) {
addToDestinations(summaryField, schema);
}
}
}
private DocumentSummary getOrCreateAttributePrefetchSummary(Schema schema) {
DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch");
if (summary == null) {
summary = new DocumentSummary("attributeprefetch", schema);
schema.addSummary(summary);
}
return summary;
}
private void addPrefetchAttribute(Attribute attribute, SDField field, Schema schema) {
if (attribute.getPrefetchValue() == null) {
SummaryField fieldSummaryField = field.getSummaryField(attribute.getName());
if (fieldSummaryField != null && fieldSummaryField.getTransform().isDynamic()) return;
SummaryField explicitSummaryField = schema.getExplicitSummaryField(attribute.getName());
if (explicitSummaryField != null && explicitSummaryField.getTransform().isDynamic()) return;
}
DocumentSummary summary = getOrCreateAttributePrefetchSummary(schema);
SummaryField attributeSummaryField = new SummaryField(attribute.getName(), attribute.getDataType());
attributeSummaryField.addSource(attribute.getName());
attributeSummaryField.addDestination("attributeprefetch");
attributeSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
summary.add(attributeSummaryField);
}
private boolean isValid(SummaryField summaryField, Schema schema, boolean validate) {
if (summaryField.getTransform() == SummaryTransform.DISTANCE ||
summaryField.getTransform() == SummaryTransform.POSITIONS) {
int sourceCount = summaryField.getSourceCount();
if (validate && sourceCount != 1) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Expected 1 source field, got " + sourceCount + ".");
}
String sourceName = summaryField.getSingleSource();
if (validate && schema.getAttribute(sourceName) == null) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Summary source attribute '" + sourceName + "' not found.");
}
return true;
}
String fieldName = summaryField.getSourceField();
SDField sourceField = schema.getConcreteField(fieldName);
if (validate && sourceField == null) {
throw newProcessException(schema, summaryField, "Source field '" + fieldName + "' does not exist.");
}
if (! sourceField.doesSummarying() &&
summaryField.getTransform() != SummaryTransform.ATTRIBUTE &&
summaryField.getTransform() != SummaryTransform.GEOPOS)
{
deployLogger.logApplicationPackage(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
" is not creating a summary value in its indexing statement");
return false;
}
if (summaryField.getTransform().isDynamic()
&& summaryField.getName().equals(sourceField.getName())
&& sourceField.doesAttributing()) {
Attribute attribute = sourceField.getAttributes().get(sourceField.getName());
if (attribute != null) {
String destinations = "document summary 'default'";
if (summaryField.getDestinations().size() >0) {
destinations = "document summaries " + summaryField.getDestinations();
}
deployLogger.logApplicationPackage(Level.WARNING,
"Will fetch the disk summary value of " + sourceField + " in " + destinations +
" since this summary field uses a dynamic summary value (snippet/bolding): Dynamic summaries and bolding " +
"is not supported with summary values fetched from in-memory attributes yet. If you want to see partial updates " +
"to this attribute, remove any bolding and dynamic snippeting from this field");
}
}
return true;
}
private void addToDestinations(SummaryField summaryField, Schema schema) {
if (summaryField.getDestinations().size() == 0) {
addToDestination("default", summaryField, schema);
}
else {
for (String destinationName : summaryField.getDestinations())
addToDestination(destinationName, summaryField, schema);
}
}
private void addToDestination(String destinationName, SummaryField summaryField, Schema schema) {
DocumentSummary destination = schema.getSummariesInThis().get(destinationName);
if (destination == null) {
destination = new DocumentSummary(destinationName, schema);
schema.addSummary(destination);
destination.add(summaryField);
}
else {
SummaryField existingField= destination.getSummaryField(summaryField.getName());
SummaryField merged = summaryField.mergeWith(existingField);
destination.add(merged);
}
}
} |
attributeprefetch not dead yet..... | private DocumentSummary getOrCreateAttributePrefetchSummary(Schema schema) {
DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch");
if (summary == null) {
summary = new DocumentSummary("attributeprefetch", schema);
schema.addSummary(summary);
}
return summary;
} | DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch"); | private DocumentSummary getOrCreateAttributePrefetchSummary(Schema schema) {
DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch");
if (summary == null) {
summary = new DocumentSummary("attributeprefetch", schema);
schema.addSummary(summary);
}
return summary;
} | class ImplicitSummaries extends Processor {
public ImplicitSummaries(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
DocumentSummary defaultSummary = schema.getSummariesInThis().get("default");
if (defaultSummary == null) {
defaultSummary = new DocumentSummary("default", schema);
defaultSummary.setFromDisk(true);
schema.addSummary(defaultSummary);
}
for (SDField field : schema.allConcreteFields()) {
collectSummaries(field, schema, validate);
}
for (DocumentSummary documentSummary : schema.getSummaries().values()) {
documentSummary.purgeImplicits();
}
}
private void addSummaryFieldSources(SummaryField summaryField, SDField sdField) {
sdField.addSummaryFieldSources(summaryField);
}
private void collectSummaries(SDField field , Schema schema, boolean validate) {
SummaryField addedSummaryField = null;
String fieldName = field.getName();
SummaryField fieldSummaryField = field.getSummaryField(fieldName);
if (fieldSummaryField == null && field.doesSummarying()) {
fieldSummaryField = new SummaryField(fieldName, field.getDataType());
fieldSummaryField.setImplicit(true);
addSummaryFieldSources(fieldSummaryField, field);
fieldSummaryField.addDestination("default");
field.addSummaryField(fieldSummaryField);
addedSummaryField = fieldSummaryField;
}
if (fieldSummaryField != null) {
for (String dest : fieldSummaryField.getDestinations()) {
DocumentSummary summary = schema.getSummariesInThis().get(dest);
if (summary != null) {
summary.add(fieldSummaryField);
}
}
}
for (Attribute attribute : field.getAttributes().values()) {
if (attribute.getName().equals(fieldName)) {
if (addedSummaryField != null) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (attribute.isPrefetch()) {
addPrefetchAttribute(attribute, field, schema);
}
}
}
if (addedSummaryField != null && isComplexFieldWithOnlyStructFieldAttributes(field)) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
if (field.doesSummarying()) {
for (Attribute attribute : field.getAttributes().values()) {
if ( ! attribute.isPosition()) continue;
DocumentSummary attributePrefetchSummary = getOrCreateAttributePrefetchSummary(schema);
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getDistanceSummaryFieldName(fieldName)));
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getPositionSummaryFieldName(fieldName)));
}
}
for (SummaryField summaryField : field.getSummaryFields().values()) {
Attribute attribute = field.getAttributes().get(fieldName);
if (attribute != null && summaryField.getTransform() == SummaryTransform.NONE) {
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (isValid(summaryField, schema, validate)) {
addToDestinations(summaryField, schema);
}
}
}
private void addPrefetchAttribute(Attribute attribute, SDField field, Schema schema) {
if (attribute.getPrefetchValue() == null) {
SummaryField fieldSummaryField = field.getSummaryField(attribute.getName());
if (fieldSummaryField != null && fieldSummaryField.getTransform().isDynamic()) return;
SummaryField explicitSummaryField = schema.getExplicitSummaryField(attribute.getName());
if (explicitSummaryField != null && explicitSummaryField.getTransform().isDynamic()) return;
}
DocumentSummary summary = getOrCreateAttributePrefetchSummary(schema);
SummaryField attributeSummaryField = new SummaryField(attribute.getName(), attribute.getDataType());
attributeSummaryField.addSource(attribute.getName());
attributeSummaryField.addDestination("attributeprefetch");
attributeSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
summary.add(attributeSummaryField);
}
private boolean isValid(SummaryField summaryField, Schema schema, boolean validate) {
if (summaryField.getTransform() == SummaryTransform.DISTANCE ||
summaryField.getTransform() == SummaryTransform.POSITIONS) {
int sourceCount = summaryField.getSourceCount();
if (validate && sourceCount != 1) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Expected 1 source field, got " + sourceCount + ".");
}
String sourceName = summaryField.getSingleSource();
if (validate && schema.getAttribute(sourceName) == null) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Summary source attribute '" + sourceName + "' not found.");
}
return true;
}
String fieldName = summaryField.getSourceField();
SDField sourceField = schema.getConcreteField(fieldName);
if (validate && sourceField == null) {
throw newProcessException(schema, summaryField, "Source field '" + fieldName + "' does not exist.");
}
if (! sourceField.doesSummarying() &&
! summaryField.getTransform().equals(SummaryTransform.ATTRIBUTE) &&
! summaryField.getTransform().equals(SummaryTransform.GEOPOS))
{
deployLogger.logApplicationPackage(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
" is not creating a summary value in its indexing statement");
return false;
}
if (summaryField.getTransform().isDynamic()
&& summaryField.getName().equals(sourceField.getName())
&& sourceField.doesAttributing()) {
Attribute attribute = sourceField.getAttributes().get(sourceField.getName());
if (attribute != null) {
String destinations = "document summary 'default'";
if (summaryField.getDestinations().size() >0) {
destinations = "document summaries " + summaryField.getDestinations();
}
deployLogger.logApplicationPackage(Level.WARNING,
"Will fetch the disk summary value of " + sourceField + " in " + destinations +
" since this summary field uses a dynamic summary value (snippet/bolding): Dynamic summaries and bolding " +
"is not supported with summary values fetched from in-memory attributes yet. If you want to see partial updates " +
"to this attribute, remove any bolding and dynamic snippeting from this field");
}
}
return true;
}
private void addToDestinations(SummaryField summaryField, Schema schema) {
if (summaryField.getDestinations().size() == 0) {
addToDestination("default", summaryField, schema);
}
else {
for (String destinationName : summaryField.getDestinations())
addToDestination(destinationName, summaryField, schema);
}
}
private void addToDestination(String destinationName, SummaryField summaryField, Schema schema) {
DocumentSummary destination = schema.getSummariesInThis().get(destinationName);
if (destination == null) {
destination = new DocumentSummary(destinationName, schema);
schema.addSummary(destination);
destination.add(summaryField);
}
else {
SummaryField existingField= destination.getSummaryField(summaryField.getName());
SummaryField merged = summaryField.mergeWith(existingField);
destination.add(merged);
}
}
} | class ImplicitSummaries extends Processor {
public ImplicitSummaries(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
DocumentSummary defaultSummary = schema.getSummariesInThis().get("default");
if (defaultSummary == null) {
defaultSummary = new DocumentSummary("default", schema);
defaultSummary.setFromDisk(true);
schema.addSummary(defaultSummary);
}
for (SDField field : schema.allConcreteFields()) {
collectSummaries(field, schema, validate);
}
for (DocumentSummary documentSummary : schema.getSummaries().values()) {
documentSummary.purgeImplicits();
}
}
private void addSummaryFieldSources(SummaryField summaryField, SDField sdField) {
sdField.addSummaryFieldSources(summaryField);
}
private void collectSummaries(SDField field , Schema schema, boolean validate) {
SummaryField addedSummaryField = null;
String fieldName = field.getName();
SummaryField fieldSummaryField = field.getSummaryField(fieldName);
if (fieldSummaryField == null && field.doesSummarying()) {
fieldSummaryField = new SummaryField(fieldName, field.getDataType());
fieldSummaryField.setImplicit(true);
addSummaryFieldSources(fieldSummaryField, field);
fieldSummaryField.addDestination("default");
field.addSummaryField(fieldSummaryField);
addedSummaryField = fieldSummaryField;
}
if (fieldSummaryField != null) {
for (String dest : fieldSummaryField.getDestinations()) {
DocumentSummary summary = schema.getSummariesInThis().get(dest);
if (summary != null) {
summary.add(fieldSummaryField);
}
}
}
for (Attribute attribute : field.getAttributes().values()) {
if (attribute.getName().equals(fieldName)) {
if (addedSummaryField != null) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (attribute.isPrefetch()) {
addPrefetchAttribute(attribute, field, schema);
}
}
}
if (addedSummaryField != null && isComplexFieldWithOnlyStructFieldAttributes(field)) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
if (field.doesSummarying()) {
for (Attribute attribute : field.getAttributes().values()) {
if ( ! attribute.isPosition()) continue;
DocumentSummary attributePrefetchSummary = getOrCreateAttributePrefetchSummary(schema);
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getDistanceSummaryFieldName(fieldName)));
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getPositionSummaryFieldName(fieldName)));
}
}
for (SummaryField summaryField : field.getSummaryFields().values()) {
Attribute attribute = field.getAttributes().get(fieldName);
if (attribute != null && summaryField.getTransform() == SummaryTransform.NONE) {
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (isValid(summaryField, schema, validate)) {
addToDestinations(summaryField, schema);
}
}
}
private void addPrefetchAttribute(Attribute attribute, SDField field, Schema schema) {
if (attribute.getPrefetchValue() == null) {
SummaryField fieldSummaryField = field.getSummaryField(attribute.getName());
if (fieldSummaryField != null && fieldSummaryField.getTransform().isDynamic()) return;
SummaryField explicitSummaryField = schema.getExplicitSummaryField(attribute.getName());
if (explicitSummaryField != null && explicitSummaryField.getTransform().isDynamic()) return;
}
DocumentSummary summary = getOrCreateAttributePrefetchSummary(schema);
SummaryField attributeSummaryField = new SummaryField(attribute.getName(), attribute.getDataType());
attributeSummaryField.addSource(attribute.getName());
attributeSummaryField.addDestination("attributeprefetch");
attributeSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
summary.add(attributeSummaryField);
}
private boolean isValid(SummaryField summaryField, Schema schema, boolean validate) {
if (summaryField.getTransform() == SummaryTransform.DISTANCE ||
summaryField.getTransform() == SummaryTransform.POSITIONS) {
int sourceCount = summaryField.getSourceCount();
if (validate && sourceCount != 1) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Expected 1 source field, got " + sourceCount + ".");
}
String sourceName = summaryField.getSingleSource();
if (validate && schema.getAttribute(sourceName) == null) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Summary source attribute '" + sourceName + "' not found.");
}
return true;
}
String fieldName = summaryField.getSourceField();
SDField sourceField = schema.getConcreteField(fieldName);
if (validate && sourceField == null) {
throw newProcessException(schema, summaryField, "Source field '" + fieldName + "' does not exist.");
}
if (! sourceField.doesSummarying() &&
summaryField.getTransform() != SummaryTransform.ATTRIBUTE &&
summaryField.getTransform() != SummaryTransform.GEOPOS)
{
deployLogger.logApplicationPackage(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
" is not creating a summary value in its indexing statement");
return false;
}
if (summaryField.getTransform().isDynamic()
&& summaryField.getName().equals(sourceField.getName())
&& sourceField.doesAttributing()) {
Attribute attribute = sourceField.getAttributes().get(sourceField.getName());
if (attribute != null) {
String destinations = "document summary 'default'";
if (summaryField.getDestinations().size() >0) {
destinations = "document summaries " + summaryField.getDestinations();
}
deployLogger.logApplicationPackage(Level.WARNING,
"Will fetch the disk summary value of " + sourceField + " in " + destinations +
" since this summary field uses a dynamic summary value (snippet/bolding): Dynamic summaries and bolding " +
"is not supported with summary values fetched from in-memory attributes yet. If you want to see partial updates " +
"to this attribute, remove any bolding and dynamic snippeting from this field");
}
}
return true;
}
private void addToDestinations(SummaryField summaryField, Schema schema) {
if (summaryField.getDestinations().size() == 0) {
addToDestination("default", summaryField, schema);
}
else {
for (String destinationName : summaryField.getDestinations())
addToDestination(destinationName, summaryField, schema);
}
}
private void addToDestination(String destinationName, SummaryField summaryField, Schema schema) {
DocumentSummary destination = schema.getSummariesInThis().get(destinationName);
if (destination == null) {
destination = new DocumentSummary(destinationName, schema);
schema.addSummary(destination);
destination.add(summaryField);
}
else {
SummaryField existingField= destination.getSummaryField(summaryField.getName());
SummaryField merged = summaryField.mergeWith(existingField);
destination.add(merged);
}
}
} |
Somebody needs to work on that and the "default" nonsense if we're to make it in time for Vespa 8 ... | private DocumentSummary getOrCreateAttributePrefetchSummary(Schema schema) {
DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch");
if (summary == null) {
summary = new DocumentSummary("attributeprefetch", schema);
schema.addSummary(summary);
}
return summary;
} | DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch"); | private DocumentSummary getOrCreateAttributePrefetchSummary(Schema schema) {
DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch");
if (summary == null) {
summary = new DocumentSummary("attributeprefetch", schema);
schema.addSummary(summary);
}
return summary;
} | class ImplicitSummaries extends Processor {
public ImplicitSummaries(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
DocumentSummary defaultSummary = schema.getSummariesInThis().get("default");
if (defaultSummary == null) {
defaultSummary = new DocumentSummary("default", schema);
defaultSummary.setFromDisk(true);
schema.addSummary(defaultSummary);
}
for (SDField field : schema.allConcreteFields()) {
collectSummaries(field, schema, validate);
}
for (DocumentSummary documentSummary : schema.getSummaries().values()) {
documentSummary.purgeImplicits();
}
}
private void addSummaryFieldSources(SummaryField summaryField, SDField sdField) {
sdField.addSummaryFieldSources(summaryField);
}
private void collectSummaries(SDField field , Schema schema, boolean validate) {
SummaryField addedSummaryField = null;
String fieldName = field.getName();
SummaryField fieldSummaryField = field.getSummaryField(fieldName);
if (fieldSummaryField == null && field.doesSummarying()) {
fieldSummaryField = new SummaryField(fieldName, field.getDataType());
fieldSummaryField.setImplicit(true);
addSummaryFieldSources(fieldSummaryField, field);
fieldSummaryField.addDestination("default");
field.addSummaryField(fieldSummaryField);
addedSummaryField = fieldSummaryField;
}
if (fieldSummaryField != null) {
for (String dest : fieldSummaryField.getDestinations()) {
DocumentSummary summary = schema.getSummariesInThis().get(dest);
if (summary != null) {
summary.add(fieldSummaryField);
}
}
}
for (Attribute attribute : field.getAttributes().values()) {
if (attribute.getName().equals(fieldName)) {
if (addedSummaryField != null) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (attribute.isPrefetch()) {
addPrefetchAttribute(attribute, field, schema);
}
}
}
if (addedSummaryField != null && isComplexFieldWithOnlyStructFieldAttributes(field)) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
if (field.doesSummarying()) {
for (Attribute attribute : field.getAttributes().values()) {
if ( ! attribute.isPosition()) continue;
DocumentSummary attributePrefetchSummary = getOrCreateAttributePrefetchSummary(schema);
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getDistanceSummaryFieldName(fieldName)));
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getPositionSummaryFieldName(fieldName)));
}
}
for (SummaryField summaryField : field.getSummaryFields().values()) {
Attribute attribute = field.getAttributes().get(fieldName);
if (attribute != null && summaryField.getTransform() == SummaryTransform.NONE) {
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (isValid(summaryField, schema, validate)) {
addToDestinations(summaryField, schema);
}
}
}
private void addPrefetchAttribute(Attribute attribute, SDField field, Schema schema) {
if (attribute.getPrefetchValue() == null) {
SummaryField fieldSummaryField = field.getSummaryField(attribute.getName());
if (fieldSummaryField != null && fieldSummaryField.getTransform().isDynamic()) return;
SummaryField explicitSummaryField = schema.getExplicitSummaryField(attribute.getName());
if (explicitSummaryField != null && explicitSummaryField.getTransform().isDynamic()) return;
}
DocumentSummary summary = getOrCreateAttributePrefetchSummary(schema);
SummaryField attributeSummaryField = new SummaryField(attribute.getName(), attribute.getDataType());
attributeSummaryField.addSource(attribute.getName());
attributeSummaryField.addDestination("attributeprefetch");
attributeSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
summary.add(attributeSummaryField);
}
private boolean isValid(SummaryField summaryField, Schema schema, boolean validate) {
if (summaryField.getTransform() == SummaryTransform.DISTANCE ||
summaryField.getTransform() == SummaryTransform.POSITIONS) {
int sourceCount = summaryField.getSourceCount();
if (validate && sourceCount != 1) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Expected 1 source field, got " + sourceCount + ".");
}
String sourceName = summaryField.getSingleSource();
if (validate && schema.getAttribute(sourceName) == null) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Summary source attribute '" + sourceName + "' not found.");
}
return true;
}
String fieldName = summaryField.getSourceField();
SDField sourceField = schema.getConcreteField(fieldName);
if (validate && sourceField == null) {
throw newProcessException(schema, summaryField, "Source field '" + fieldName + "' does not exist.");
}
if (! sourceField.doesSummarying() &&
! summaryField.getTransform().equals(SummaryTransform.ATTRIBUTE) &&
! summaryField.getTransform().equals(SummaryTransform.GEOPOS))
{
deployLogger.logApplicationPackage(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
" is not creating a summary value in its indexing statement");
return false;
}
if (summaryField.getTransform().isDynamic()
&& summaryField.getName().equals(sourceField.getName())
&& sourceField.doesAttributing()) {
Attribute attribute = sourceField.getAttributes().get(sourceField.getName());
if (attribute != null) {
String destinations = "document summary 'default'";
if (summaryField.getDestinations().size() >0) {
destinations = "document summaries " + summaryField.getDestinations();
}
deployLogger.logApplicationPackage(Level.WARNING,
"Will fetch the disk summary value of " + sourceField + " in " + destinations +
" since this summary field uses a dynamic summary value (snippet/bolding): Dynamic summaries and bolding " +
"is not supported with summary values fetched from in-memory attributes yet. If you want to see partial updates " +
"to this attribute, remove any bolding and dynamic snippeting from this field");
}
}
return true;
}
private void addToDestinations(SummaryField summaryField, Schema schema) {
if (summaryField.getDestinations().size() == 0) {
addToDestination("default", summaryField, schema);
}
else {
for (String destinationName : summaryField.getDestinations())
addToDestination(destinationName, summaryField, schema);
}
}
private void addToDestination(String destinationName, SummaryField summaryField, Schema schema) {
DocumentSummary destination = schema.getSummariesInThis().get(destinationName);
if (destination == null) {
destination = new DocumentSummary(destinationName, schema);
schema.addSummary(destination);
destination.add(summaryField);
}
else {
SummaryField existingField= destination.getSummaryField(summaryField.getName());
SummaryField merged = summaryField.mergeWith(existingField);
destination.add(merged);
}
}
} | class ImplicitSummaries extends Processor {
public ImplicitSummaries(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
public void process(boolean validate, boolean documentsOnly) {
DocumentSummary defaultSummary = schema.getSummariesInThis().get("default");
if (defaultSummary == null) {
defaultSummary = new DocumentSummary("default", schema);
defaultSummary.setFromDisk(true);
schema.addSummary(defaultSummary);
}
for (SDField field : schema.allConcreteFields()) {
collectSummaries(field, schema, validate);
}
for (DocumentSummary documentSummary : schema.getSummaries().values()) {
documentSummary.purgeImplicits();
}
}
private void addSummaryFieldSources(SummaryField summaryField, SDField sdField) {
sdField.addSummaryFieldSources(summaryField);
}
private void collectSummaries(SDField field , Schema schema, boolean validate) {
SummaryField addedSummaryField = null;
String fieldName = field.getName();
SummaryField fieldSummaryField = field.getSummaryField(fieldName);
if (fieldSummaryField == null && field.doesSummarying()) {
fieldSummaryField = new SummaryField(fieldName, field.getDataType());
fieldSummaryField.setImplicit(true);
addSummaryFieldSources(fieldSummaryField, field);
fieldSummaryField.addDestination("default");
field.addSummaryField(fieldSummaryField);
addedSummaryField = fieldSummaryField;
}
if (fieldSummaryField != null) {
for (String dest : fieldSummaryField.getDestinations()) {
DocumentSummary summary = schema.getSummariesInThis().get(dest);
if (summary != null) {
summary.add(fieldSummaryField);
}
}
}
for (Attribute attribute : field.getAttributes().values()) {
if (attribute.getName().equals(fieldName)) {
if (addedSummaryField != null) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (attribute.isPrefetch()) {
addPrefetchAttribute(attribute, field, schema);
}
}
}
if (addedSummaryField != null && isComplexFieldWithOnlyStructFieldAttributes(field)) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
if (field.doesSummarying()) {
for (Attribute attribute : field.getAttributes().values()) {
if ( ! attribute.isPosition()) continue;
DocumentSummary attributePrefetchSummary = getOrCreateAttributePrefetchSummary(schema);
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getDistanceSummaryFieldName(fieldName)));
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getPositionSummaryFieldName(fieldName)));
}
}
for (SummaryField summaryField : field.getSummaryFields().values()) {
Attribute attribute = field.getAttributes().get(fieldName);
if (attribute != null && summaryField.getTransform() == SummaryTransform.NONE) {
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (isValid(summaryField, schema, validate)) {
addToDestinations(summaryField, schema);
}
}
}
private void addPrefetchAttribute(Attribute attribute, SDField field, Schema schema) {
if (attribute.getPrefetchValue() == null) {
SummaryField fieldSummaryField = field.getSummaryField(attribute.getName());
if (fieldSummaryField != null && fieldSummaryField.getTransform().isDynamic()) return;
SummaryField explicitSummaryField = schema.getExplicitSummaryField(attribute.getName());
if (explicitSummaryField != null && explicitSummaryField.getTransform().isDynamic()) return;
}
DocumentSummary summary = getOrCreateAttributePrefetchSummary(schema);
SummaryField attributeSummaryField = new SummaryField(attribute.getName(), attribute.getDataType());
attributeSummaryField.addSource(attribute.getName());
attributeSummaryField.addDestination("attributeprefetch");
attributeSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
summary.add(attributeSummaryField);
}
private boolean isValid(SummaryField summaryField, Schema schema, boolean validate) {
if (summaryField.getTransform() == SummaryTransform.DISTANCE ||
summaryField.getTransform() == SummaryTransform.POSITIONS) {
int sourceCount = summaryField.getSourceCount();
if (validate && sourceCount != 1) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Expected 1 source field, got " + sourceCount + ".");
}
String sourceName = summaryField.getSingleSource();
if (validate && schema.getAttribute(sourceName) == null) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Summary source attribute '" + sourceName + "' not found.");
}
return true;
}
String fieldName = summaryField.getSourceField();
SDField sourceField = schema.getConcreteField(fieldName);
if (validate && sourceField == null) {
throw newProcessException(schema, summaryField, "Source field '" + fieldName + "' does not exist.");
}
if (! sourceField.doesSummarying() &&
summaryField.getTransform() != SummaryTransform.ATTRIBUTE &&
summaryField.getTransform() != SummaryTransform.GEOPOS)
{
deployLogger.logApplicationPackage(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
" is not creating a summary value in its indexing statement");
return false;
}
if (summaryField.getTransform().isDynamic()
&& summaryField.getName().equals(sourceField.getName())
&& sourceField.doesAttributing()) {
Attribute attribute = sourceField.getAttributes().get(sourceField.getName());
if (attribute != null) {
String destinations = "document summary 'default'";
if (summaryField.getDestinations().size() >0) {
destinations = "document summaries " + summaryField.getDestinations();
}
deployLogger.logApplicationPackage(Level.WARNING,
"Will fetch the disk summary value of " + sourceField + " in " + destinations +
" since this summary field uses a dynamic summary value (snippet/bolding): Dynamic summaries and bolding " +
"is not supported with summary values fetched from in-memory attributes yet. If you want to see partial updates " +
"to this attribute, remove any bolding and dynamic snippeting from this field");
}
}
return true;
}
private void addToDestinations(SummaryField summaryField, Schema schema) {
if (summaryField.getDestinations().size() == 0) {
addToDestination("default", summaryField, schema);
}
else {
for (String destinationName : summaryField.getDestinations())
addToDestination(destinationName, summaryField, schema);
}
}
private void addToDestination(String destinationName, SummaryField summaryField, Schema schema) {
DocumentSummary destination = schema.getSummariesInThis().get(destinationName);
if (destination == null) {
destination = new DocumentSummary(destinationName, schema);
schema.addSummary(destination);
destination.add(summaryField);
}
else {
SummaryField existingField= destination.getSummaryField(summaryField.getName());
SummaryField merged = summaryField.mergeWith(existingField);
destination.add(merged);
}
}
} |
True | public void process(boolean validate, boolean documentsOnly) {
DocumentSummary defaultSummary = schema.getSummariesInThis().get("default");
if (defaultSummary == null) {
defaultSummary = new DocumentSummary("default", schema);
defaultSummary.setFromDisk(true);
schema.addSummary(defaultSummary);
}
for (SDField field : schema.allConcreteFields()) {
collectSummaries(field, schema, validate);
}
for (DocumentSummary documentSummary : schema.getSummaries().values()) {
documentSummary.purgeImplicits();
}
} | defaultSummary.setFromDisk(true); | public void process(boolean validate, boolean documentsOnly) {
DocumentSummary defaultSummary = schema.getSummariesInThis().get("default");
if (defaultSummary == null) {
defaultSummary = new DocumentSummary("default", schema);
defaultSummary.setFromDisk(true);
schema.addSummary(defaultSummary);
}
for (SDField field : schema.allConcreteFields()) {
collectSummaries(field, schema, validate);
}
for (DocumentSummary documentSummary : schema.getSummaries().values()) {
documentSummary.purgeImplicits();
}
} | class ImplicitSummaries extends Processor {
public ImplicitSummaries(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
private void addSummaryFieldSources(SummaryField summaryField, SDField sdField) {
sdField.addSummaryFieldSources(summaryField);
}
private void collectSummaries(SDField field , Schema schema, boolean validate) {
SummaryField addedSummaryField = null;
String fieldName = field.getName();
SummaryField fieldSummaryField = field.getSummaryField(fieldName);
if (fieldSummaryField == null && field.doesSummarying()) {
fieldSummaryField = new SummaryField(fieldName, field.getDataType());
fieldSummaryField.setImplicit(true);
addSummaryFieldSources(fieldSummaryField, field);
fieldSummaryField.addDestination("default");
field.addSummaryField(fieldSummaryField);
addedSummaryField = fieldSummaryField;
}
if (fieldSummaryField != null) {
for (String dest : fieldSummaryField.getDestinations()) {
DocumentSummary summary = schema.getSummariesInThis().get(dest);
if (summary != null) {
summary.add(fieldSummaryField);
}
}
}
for (Attribute attribute : field.getAttributes().values()) {
if (attribute.getName().equals(fieldName)) {
if (addedSummaryField != null) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (attribute.isPrefetch()) {
addPrefetchAttribute(attribute, field, schema);
}
}
}
if (addedSummaryField != null && isComplexFieldWithOnlyStructFieldAttributes(field)) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
if (field.doesSummarying()) {
for (Attribute attribute : field.getAttributes().values()) {
if ( ! attribute.isPosition()) continue;
DocumentSummary attributePrefetchSummary = getOrCreateAttributePrefetchSummary(schema);
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getDistanceSummaryFieldName(fieldName)));
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getPositionSummaryFieldName(fieldName)));
}
}
for (SummaryField summaryField : field.getSummaryFields().values()) {
Attribute attribute = field.getAttributes().get(fieldName);
if (attribute != null && summaryField.getTransform() == SummaryTransform.NONE) {
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (isValid(summaryField, schema, validate)) {
addToDestinations(summaryField, schema);
}
}
}
private DocumentSummary getOrCreateAttributePrefetchSummary(Schema schema) {
DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch");
if (summary == null) {
summary = new DocumentSummary("attributeprefetch", schema);
schema.addSummary(summary);
}
return summary;
}
private void addPrefetchAttribute(Attribute attribute, SDField field, Schema schema) {
if (attribute.getPrefetchValue() == null) {
SummaryField fieldSummaryField = field.getSummaryField(attribute.getName());
if (fieldSummaryField != null && fieldSummaryField.getTransform().isDynamic()) return;
SummaryField explicitSummaryField = schema.getExplicitSummaryField(attribute.getName());
if (explicitSummaryField != null && explicitSummaryField.getTransform().isDynamic()) return;
}
DocumentSummary summary = getOrCreateAttributePrefetchSummary(schema);
SummaryField attributeSummaryField = new SummaryField(attribute.getName(), attribute.getDataType());
attributeSummaryField.addSource(attribute.getName());
attributeSummaryField.addDestination("attributeprefetch");
attributeSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
summary.add(attributeSummaryField);
}
private boolean isValid(SummaryField summaryField, Schema schema, boolean validate) {
if (summaryField.getTransform() == SummaryTransform.DISTANCE ||
summaryField.getTransform() == SummaryTransform.POSITIONS) {
int sourceCount = summaryField.getSourceCount();
if (validate && sourceCount != 1) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Expected 1 source field, got " + sourceCount + ".");
}
String sourceName = summaryField.getSingleSource();
if (validate && schema.getAttribute(sourceName) == null) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Summary source attribute '" + sourceName + "' not found.");
}
return true;
}
String fieldName = summaryField.getSourceField();
SDField sourceField = schema.getConcreteField(fieldName);
if (validate && sourceField == null) {
throw newProcessException(schema, summaryField, "Source field '" + fieldName + "' does not exist.");
}
if (! sourceField.doesSummarying() &&
! summaryField.getTransform().equals(SummaryTransform.ATTRIBUTE) &&
! summaryField.getTransform().equals(SummaryTransform.GEOPOS))
{
deployLogger.logApplicationPackage(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
" is not creating a summary value in its indexing statement");
return false;
}
if (summaryField.getTransform().isDynamic()
&& summaryField.getName().equals(sourceField.getName())
&& sourceField.doesAttributing()) {
Attribute attribute = sourceField.getAttributes().get(sourceField.getName());
if (attribute != null) {
String destinations = "document summary 'default'";
if (summaryField.getDestinations().size() >0) {
destinations = "document summaries " + summaryField.getDestinations();
}
deployLogger.logApplicationPackage(Level.WARNING,
"Will fetch the disk summary value of " + sourceField + " in " + destinations +
" since this summary field uses a dynamic summary value (snippet/bolding): Dynamic summaries and bolding " +
"is not supported with summary values fetched from in-memory attributes yet. If you want to see partial updates " +
"to this attribute, remove any bolding and dynamic snippeting from this field");
}
}
return true;
}
private void addToDestinations(SummaryField summaryField, Schema schema) {
if (summaryField.getDestinations().size() == 0) {
addToDestination("default", summaryField, schema);
}
else {
for (String destinationName : summaryField.getDestinations())
addToDestination(destinationName, summaryField, schema);
}
}
private void addToDestination(String destinationName, SummaryField summaryField, Schema schema) {
DocumentSummary destination = schema.getSummariesInThis().get(destinationName);
if (destination == null) {
destination = new DocumentSummary(destinationName, schema);
schema.addSummary(destination);
destination.add(summaryField);
}
else {
SummaryField existingField= destination.getSummaryField(summaryField.getName());
SummaryField merged = summaryField.mergeWith(existingField);
destination.add(merged);
}
}
} | class ImplicitSummaries extends Processor {
public ImplicitSummaries(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
private void addSummaryFieldSources(SummaryField summaryField, SDField sdField) {
sdField.addSummaryFieldSources(summaryField);
}
private void collectSummaries(SDField field , Schema schema, boolean validate) {
SummaryField addedSummaryField = null;
String fieldName = field.getName();
SummaryField fieldSummaryField = field.getSummaryField(fieldName);
if (fieldSummaryField == null && field.doesSummarying()) {
fieldSummaryField = new SummaryField(fieldName, field.getDataType());
fieldSummaryField.setImplicit(true);
addSummaryFieldSources(fieldSummaryField, field);
fieldSummaryField.addDestination("default");
field.addSummaryField(fieldSummaryField);
addedSummaryField = fieldSummaryField;
}
if (fieldSummaryField != null) {
for (String dest : fieldSummaryField.getDestinations()) {
DocumentSummary summary = schema.getSummariesInThis().get(dest);
if (summary != null) {
summary.add(fieldSummaryField);
}
}
}
for (Attribute attribute : field.getAttributes().values()) {
if (attribute.getName().equals(fieldName)) {
if (addedSummaryField != null) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (attribute.isPrefetch()) {
addPrefetchAttribute(attribute, field, schema);
}
}
}
if (addedSummaryField != null && isComplexFieldWithOnlyStructFieldAttributes(field)) {
addedSummaryField.setTransform(SummaryTransform.ATTRIBUTECOMBINER);
}
if (field.doesSummarying()) {
for (Attribute attribute : field.getAttributes().values()) {
if ( ! attribute.isPosition()) continue;
DocumentSummary attributePrefetchSummary = getOrCreateAttributePrefetchSummary(schema);
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getDistanceSummaryFieldName(fieldName)));
attributePrefetchSummary.add(field.getSummaryField(PositionDataType.getPositionSummaryFieldName(fieldName)));
}
}
for (SummaryField summaryField : field.getSummaryFields().values()) {
Attribute attribute = field.getAttributes().get(fieldName);
if (attribute != null && summaryField.getTransform() == SummaryTransform.NONE) {
summaryField.setTransform(SummaryTransform.ATTRIBUTE);
}
if (isValid(summaryField, schema, validate)) {
addToDestinations(summaryField, schema);
}
}
}
private DocumentSummary getOrCreateAttributePrefetchSummary(Schema schema) {
DocumentSummary summary = schema.getSummariesInThis().get("attributeprefetch");
if (summary == null) {
summary = new DocumentSummary("attributeprefetch", schema);
schema.addSummary(summary);
}
return summary;
}
private void addPrefetchAttribute(Attribute attribute, SDField field, Schema schema) {
if (attribute.getPrefetchValue() == null) {
SummaryField fieldSummaryField = field.getSummaryField(attribute.getName());
if (fieldSummaryField != null && fieldSummaryField.getTransform().isDynamic()) return;
SummaryField explicitSummaryField = schema.getExplicitSummaryField(attribute.getName());
if (explicitSummaryField != null && explicitSummaryField.getTransform().isDynamic()) return;
}
DocumentSummary summary = getOrCreateAttributePrefetchSummary(schema);
SummaryField attributeSummaryField = new SummaryField(attribute.getName(), attribute.getDataType());
attributeSummaryField.addSource(attribute.getName());
attributeSummaryField.addDestination("attributeprefetch");
attributeSummaryField.setTransform(SummaryTransform.ATTRIBUTE);
summary.add(attributeSummaryField);
}
private boolean isValid(SummaryField summaryField, Schema schema, boolean validate) {
if (summaryField.getTransform() == SummaryTransform.DISTANCE ||
summaryField.getTransform() == SummaryTransform.POSITIONS) {
int sourceCount = summaryField.getSourceCount();
if (validate && sourceCount != 1) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Expected 1 source field, got " + sourceCount + ".");
}
String sourceName = summaryField.getSingleSource();
if (validate && schema.getAttribute(sourceName) == null) {
throw newProcessException(schema.getName(), summaryField.getName(),
"Summary source attribute '" + sourceName + "' not found.");
}
return true;
}
String fieldName = summaryField.getSourceField();
SDField sourceField = schema.getConcreteField(fieldName);
if (validate && sourceField == null) {
throw newProcessException(schema, summaryField, "Source field '" + fieldName + "' does not exist.");
}
if (! sourceField.doesSummarying() &&
summaryField.getTransform() != SummaryTransform.ATTRIBUTE &&
summaryField.getTransform() != SummaryTransform.GEOPOS)
{
deployLogger.logApplicationPackage(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
" is not creating a summary value in its indexing statement");
return false;
}
if (summaryField.getTransform().isDynamic()
&& summaryField.getName().equals(sourceField.getName())
&& sourceField.doesAttributing()) {
Attribute attribute = sourceField.getAttributes().get(sourceField.getName());
if (attribute != null) {
String destinations = "document summary 'default'";
if (summaryField.getDestinations().size() >0) {
destinations = "document summaries " + summaryField.getDestinations();
}
deployLogger.logApplicationPackage(Level.WARNING,
"Will fetch the disk summary value of " + sourceField + " in " + destinations +
" since this summary field uses a dynamic summary value (snippet/bolding): Dynamic summaries and bolding " +
"is not supported with summary values fetched from in-memory attributes yet. If you want to see partial updates " +
"to this attribute, remove any bolding and dynamic snippeting from this field");
}
}
return true;
}
private void addToDestinations(SummaryField summaryField, Schema schema) {
if (summaryField.getDestinations().size() == 0) {
addToDestination("default", summaryField, schema);
}
else {
for (String destinationName : summaryField.getDestinations())
addToDestination(destinationName, summaryField, schema);
}
}
private void addToDestination(String destinationName, SummaryField summaryField, Schema schema) {
DocumentSummary destination = schema.getSummariesInThis().get(destinationName);
if (destination == null) {
destination = new DocumentSummary(destinationName, schema);
schema.addSummary(destination);
destination.add(summaryField);
}
else {
SummaryField existingField= destination.getSummaryField(summaryField.getName());
SummaryField merged = summaryField.mergeWith(existingField);
destination.add(merged);
}
}
} |
This will also include application-level endpoints now (if declared), but we haven't modeled that on the config server side yet. | private void createEndpointList(DeployState deployState) {
if( deployState.getProperties().applicationId().instance().isTester()) return;
List<ApplicationClusterEndpoint> endpoints = new ArrayList<>();
List<String> hosts = getContainers().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toList());
for(String suffix : deployState.getProperties().zoneDnsSuffixes()) {
ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom(
ClusterSpec.Id.from(getName()),
deployState.getProperties().applicationId(),
suffix);
endpoints.add(ApplicationClusterEndpoint.builder()
.zoneScope()
.sharedL4Routing()
.dnsName(l4Name)
.hosts(hosts)
.build());
ApplicationClusterEndpoint.DnsName l7Name = ApplicationClusterEndpoint.DnsName.sharedNameFrom(
ClusterSpec.Id.from(getName()),
deployState.getProperties().applicationId(),
suffix);
endpoints.add(ApplicationClusterEndpoint.builder()
.zoneScope()
.sharedRouting()
.dnsName(l7Name)
.hosts(hosts)
.build());
}
Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints();
endpointsFromController.stream()
.filter(ce -> ce.clusterId().equals(getName()))
.forEach(ce -> ce.names().forEach(
name -> endpoints.add(ApplicationClusterEndpoint.builder()
.globalScope()
.sharedL4Routing()
.dnsName(ApplicationClusterEndpoint.DnsName.from(name))
.hosts(hosts)
.build())
));
endpointList = List.copyOf(endpoints);
} | Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints(); | private void createEndpointList(DeployState deployState) {
if(!deployState.isHosted()) return;
if(deployState.getProperties().applicationId().instance().isTester()) return;
List<ApplicationClusterEndpoint> endpoints = new ArrayList<>();
List<String> hosts = getContainers().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toList());
for(String suffix : deployState.getProperties().zoneDnsSuffixes()) {
ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom(
ClusterSpec.Id.from(getName()),
deployState.getProperties().applicationId(),
suffix);
endpoints.add(ApplicationClusterEndpoint.builder()
.zoneScope()
.sharedL4Routing()
.dnsName(l4Name)
.hosts(hosts)
.build());
ApplicationClusterEndpoint.DnsName l7Name = ApplicationClusterEndpoint.DnsName.sharedNameFrom(
ClusterSpec.Id.from(getName()),
deployState.getProperties().applicationId(),
suffix);
endpoints.add(ApplicationClusterEndpoint.builder()
.zoneScope()
.sharedRouting()
.dnsName(l7Name)
.hosts(hosts)
.build());
}
Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints();
endpointsFromController.stream()
.filter(ce -> ce.clusterId().equals(getName()))
.forEach(ce -> ce.names().forEach(
name -> endpoints.add(ApplicationClusterEndpoint.builder()
.scope(ce.scope())
.sharedL4Routing()
.dnsName(ApplicationClusterEndpoint.DnsName.from(name))
.hosts(hosts)
.build())
));
endpointList = List.copyOf(endpoints);
} | class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements
ApplicationBundlesConfig.Producer,
QrStartConfig.Producer,
RankProfilesConfig.Producer,
RankingConstantsConfig.Producer,
OnnxModelsConfig.Producer,
RankingExpressionsConfig.Producer,
ServletPathsConfig.Producer,
ContainerMbusConfig.Producer,
MetricsProxyApiConfig.Producer,
ZookeeperServerConfig.Producer,
ApplicationClusterInfo {
public static final String METRICS_V2_HANDLER_CLASS = MetricsV2Handler.class.getName();
public static final BindingPattern METRICS_V2_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH);
public static final BindingPattern METRICS_V2_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH + "/*");
public static final String PROMETHEUS_V1_HANDLER_CLASS = PrometheusV1Handler.class.getName();
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH);
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH + "/*");
public static final int heapSizePercentageOfTotalNodeMemory = 70;
public static final int heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster = 18;
private final Set<FileReference> applicationBundles = new LinkedHashSet<>();
private final ConfigProducerGroup<Servlet> servletGroup;
private final Set<String> previousHosts;
private ContainerModelEvaluation modelEvaluation;
private final Optional<String> tlsClientAuthority;
private MbusParams mbusParams;
private boolean messageBusEnabled = true;
private Integer memoryPercentage = null;
private List<ApplicationClusterEndpoint> endpointList = List.of();
public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) {
super(parent, configSubId, clusterId, deployState, true);
this.tlsClientAuthority = deployState.tlsClientAuthority();
servletGroup = new ConfigProducerGroup<>(this, "servlet");
previousHosts = deployState.getPreviousModel().stream()
.map(Model::allocatedHosts)
.map(AllocatedHosts::getHosts)
.flatMap(Collection::stream)
.map(HostSpec::hostname)
.collect(Collectors.toUnmodifiableSet());
addSimpleComponent("com.yahoo.language.provider.DefaultLinguisticsProvider");
addSimpleComponent("com.yahoo.language.provider.DefaultEmbedderProvider");
addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.DeprecatedSecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.AthenzIdentityProviderProvider");
addSimpleComponent(com.yahoo.container.core.documentapi.DocumentAccessProvider.class.getName());
addMetricsHandlers();
addTestrunnerComponentsIfTester(deployState);
}
@Override
protected void doPrepare(DeployState deployState) {
addAndSendApplicationBundles(deployState);
sendUserConfiguredFiles(deployState);
createEndpointList(deployState);
}
private void addAndSendApplicationBundles(DeployState deployState) {
for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) {
FileReference reference = deployState.getFileRegistry().addFile(component.getPathRelativeToAppDir());
applicationBundles.add(reference);
}
}
private void sendUserConfiguredFiles(DeployState deployState) {
FileSender fileSender = new FileSender(containers, deployState.getFileRegistry(), deployState.getDeployLogger());
for (Component<?, ?> component : getAllComponents()) {
fileSender.sendUserConfiguredFiles(component);
}
}
private void addMetricsHandlers() {
addMetricsHandler(METRICS_V2_HANDLER_CLASS, METRICS_V2_HANDLER_BINDING_1, METRICS_V2_HANDLER_BINDING_2);
addMetricsHandler(PROMETHEUS_V1_HANDLER_CLASS, PROMETHEUS_V1_HANDLER_BINDING_1, PROMETHEUS_V1_HANDLER_BINDING_2);
}
private void addMetricsHandler(String handlerClass, BindingPattern rootBinding, BindingPattern innerBinding) {
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel(handlerClass, null, null, null));
handler.addServerBindings(rootBinding, innerBinding);
addComponent(handler);
}
private void addTestrunnerComponentsIfTester(DeployState deployState) {
if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester()) {
addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-testrunner-components"));
addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-osgi-testrunner"));
addPlatformBundle(PlatformBundles.absoluteBundlePath("tenant-cd-api"));
if(deployState.zone().system().isPublic()) {
addPlatformBundle(PlatformBundles.absoluteBundlePath("cloud-tenant-cd"));
}
}
}
public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) {
this.modelEvaluation = modelEvaluation;
}
public Map<ComponentId, Servlet> getServletMap() {
return servletGroup.getComponentMap();
}
public void addServlet(Servlet servlet) {
servletGroup.addComponent(servlet.getGlobalComponentId(), servlet);
}
public Collection<Servlet> getAllServlets() {
return allServlets().collect(Collectors.toCollection(ArrayList::new));
}
private Stream<Servlet> allServlets() {
return servletGroup.getComponents().stream();
}
public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage;
}
/**
* Returns the percentage of host physical memory this application has specified for nodes in this cluster,
* or empty if this is not specified by the application.
*/
public Optional<Integer> getMemoryPercentage() { return Optional.ofNullable(memoryPercentage); }
/*
Create list of endpoints, these will be consumed later by the LBservicesProducer
*/
@Override
public void getConfig(ApplicationBundlesConfig.Builder builder) {
applicationBundles.stream().map(FileReference::value)
.forEach(builder::bundles);
}
@Override
public void getConfig(ServletPathsConfig.Builder builder) {
allServlets().forEach(servlet ->
builder.servlets(servlet.getComponentId().stringValue(),
servlet.toConfigBuilder())
);
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(RankingConstantsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(OnnxModelsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(ContainerMbusConfig.Builder builder) {
if (mbusParams != null) {
if (mbusParams.maxConcurrentFactor != null)
builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor);
if (mbusParams.documentExpansionFactor != null)
builder.documentExpansionFactor(mbusParams.documentExpansionFactor);
if (mbusParams.containerCoreMemory != null)
builder.containerCoreMemory(mbusParams.containerCoreMemory);
}
if (getDocproc() != null)
getDocproc().getConfig(builder);
}
@Override
public void getConfig(MetricsProxyApiConfig.Builder builder) {
builder.metricsPort(MetricsProxyContainer.BASEPORT)
.metricsApiPath(ApplicationMetricsHandler.METRICS_VALUES_PATH)
.prometheusApiPath(ApplicationMetricsHandler.PROMETHEUS_VALUES_PATH);
}
@Override
public void getConfig(QrStartConfig.Builder builder) {
super.getConfig(builder);
builder.jvm.verbosegc(true)
.availableProcessors(0)
.compressedClassSpaceSize(0)
.minHeapsize(1536)
.heapsize(1536);
if (getMemoryPercentage().isPresent()) {
builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get());
} else if (isHostedVespa()) {
builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getHostClusterId().isPresent() ?
heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster :
heapSizePercentageOfTotalNodeMemory);
}
}
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
if (getParent() instanceof ConfigserverCluster) return;
for (Container container : getContainers()) {
ZookeeperServerConfig.Server.Builder serverBuilder = new ZookeeperServerConfig.Server.Builder();
serverBuilder.hostname(container.getHostName())
.id(container.index())
.joining(!previousHosts.isEmpty() &&
!previousHosts.contains(container.getHostName()));
builder.server(serverBuilder);
builder.dynamicReconfiguration(true);
}
}
public Optional<String> getTlsClientAuthority() {
return tlsClientAuthority;
}
public void setMbusParams(MbusParams mbusParams) {
this.mbusParams = mbusParams;
}
public void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; }
protected boolean messageBusEnabled() { return messageBusEnabled; }
public void addMbusServer(ComponentId chainId) {
ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer"));
addComponent(
new Component<>(new ComponentModel(new BundleInstantiationSpecification(
serviceId,
ComponentSpecification.fromString(MbusServerProvider.class.getName()),
null))));
}
@Override
public List<ApplicationClusterEndpoint> endpoints() {
return endpointList;
}
public static class MbusParams {
final Double maxConcurrentFactor;
final Double documentExpansionFactor;
final Integer containerCoreMemory;
public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) {
this.maxConcurrentFactor = maxConcurrentFactor;
this.documentExpansionFactor = documentExpansionFactor;
this.containerCoreMemory = containerCoreMemory;
}
}
} | class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements
ApplicationBundlesConfig.Producer,
QrStartConfig.Producer,
RankProfilesConfig.Producer,
RankingConstantsConfig.Producer,
OnnxModelsConfig.Producer,
RankingExpressionsConfig.Producer,
ServletPathsConfig.Producer,
ContainerMbusConfig.Producer,
MetricsProxyApiConfig.Producer,
ZookeeperServerConfig.Producer,
ApplicationClusterInfo {
public static final String METRICS_V2_HANDLER_CLASS = MetricsV2Handler.class.getName();
public static final BindingPattern METRICS_V2_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH);
public static final BindingPattern METRICS_V2_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH + "/*");
public static final String PROMETHEUS_V1_HANDLER_CLASS = PrometheusV1Handler.class.getName();
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH);
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH + "/*");
public static final int heapSizePercentageOfTotalNodeMemory = 70;
public static final int heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster = 18;
private final Set<FileReference> applicationBundles = new LinkedHashSet<>();
private final ConfigProducerGroup<Servlet> servletGroup;
private final Set<String> previousHosts;
private ContainerModelEvaluation modelEvaluation;
private final Optional<String> tlsClientAuthority;
private MbusParams mbusParams;
private boolean messageBusEnabled = true;
private Integer memoryPercentage = null;
private List<ApplicationClusterEndpoint> endpointList = List.of();
public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) {
super(parent, configSubId, clusterId, deployState, true);
this.tlsClientAuthority = deployState.tlsClientAuthority();
servletGroup = new ConfigProducerGroup<>(this, "servlet");
previousHosts = deployState.getPreviousModel().stream()
.map(Model::allocatedHosts)
.map(AllocatedHosts::getHosts)
.flatMap(Collection::stream)
.map(HostSpec::hostname)
.collect(Collectors.toUnmodifiableSet());
addSimpleComponent("com.yahoo.language.provider.DefaultLinguisticsProvider");
addSimpleComponent("com.yahoo.language.provider.DefaultEmbedderProvider");
addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.DeprecatedSecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.AthenzIdentityProviderProvider");
addSimpleComponent(com.yahoo.container.core.documentapi.DocumentAccessProvider.class.getName());
addMetricsHandlers();
addTestrunnerComponentsIfTester(deployState);
}
@Override
protected void doPrepare(DeployState deployState) {
addAndSendApplicationBundles(deployState);
sendUserConfiguredFiles(deployState);
createEndpointList(deployState);
}
private void addAndSendApplicationBundles(DeployState deployState) {
for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) {
FileReference reference = deployState.getFileRegistry().addFile(component.getPathRelativeToAppDir());
applicationBundles.add(reference);
}
}
private void sendUserConfiguredFiles(DeployState deployState) {
FileSender fileSender = new FileSender(containers, deployState.getFileRegistry(), deployState.getDeployLogger());
for (Component<?, ?> component : getAllComponents()) {
fileSender.sendUserConfiguredFiles(component);
}
}
private void addMetricsHandlers() {
addMetricsHandler(METRICS_V2_HANDLER_CLASS, METRICS_V2_HANDLER_BINDING_1, METRICS_V2_HANDLER_BINDING_2);
addMetricsHandler(PROMETHEUS_V1_HANDLER_CLASS, PROMETHEUS_V1_HANDLER_BINDING_1, PROMETHEUS_V1_HANDLER_BINDING_2);
}
private void addMetricsHandler(String handlerClass, BindingPattern rootBinding, BindingPattern innerBinding) {
Handler<AbstractConfigProducer<?>> handler = new Handler<>(
new ComponentModel(handlerClass, null, null, null));
handler.addServerBindings(rootBinding, innerBinding);
addComponent(handler);
}
private void addTestrunnerComponentsIfTester(DeployState deployState) {
if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester()) {
addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-testrunner-components"));
addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-osgi-testrunner"));
addPlatformBundle(PlatformBundles.absoluteBundlePath("tenant-cd-api"));
if(deployState.zone().system().isPublic()) {
addPlatformBundle(PlatformBundles.absoluteBundlePath("cloud-tenant-cd"));
}
}
}
public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) {
this.modelEvaluation = modelEvaluation;
}
public Map<ComponentId, Servlet> getServletMap() {
return servletGroup.getComponentMap();
}
public void addServlet(Servlet servlet) {
servletGroup.addComponent(servlet.getGlobalComponentId(), servlet);
}
public Collection<Servlet> getAllServlets() {
return allServlets().collect(Collectors.toCollection(ArrayList::new));
}
private Stream<Servlet> allServlets() {
return servletGroup.getComponents().stream();
}
public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage;
}
/**
* Returns the percentage of host physical memory this application has specified for nodes in this cluster,
* or empty if this is not specified by the application.
*/
public Optional<Integer> getMemoryPercentage() { return Optional.ofNullable(memoryPercentage); }
/*
Create list of endpoints, these will be consumed later by the LBservicesProducer
*/
@Override
public void getConfig(ApplicationBundlesConfig.Builder builder) {
applicationBundles.stream().map(FileReference::value)
.forEach(builder::bundles);
}
@Override
public void getConfig(ServletPathsConfig.Builder builder) {
allServlets().forEach(servlet ->
builder.servlets(servlet.getComponentId().stringValue(),
servlet.toConfigBuilder())
);
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(RankingConstantsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(OnnxModelsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(ContainerMbusConfig.Builder builder) {
if (mbusParams != null) {
if (mbusParams.maxConcurrentFactor != null)
builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor);
if (mbusParams.documentExpansionFactor != null)
builder.documentExpansionFactor(mbusParams.documentExpansionFactor);
if (mbusParams.containerCoreMemory != null)
builder.containerCoreMemory(mbusParams.containerCoreMemory);
}
if (getDocproc() != null)
getDocproc().getConfig(builder);
}
@Override
public void getConfig(MetricsProxyApiConfig.Builder builder) {
builder.metricsPort(MetricsProxyContainer.BASEPORT)
.metricsApiPath(ApplicationMetricsHandler.METRICS_VALUES_PATH)
.prometheusApiPath(ApplicationMetricsHandler.PROMETHEUS_VALUES_PATH);
}
@Override
public void getConfig(QrStartConfig.Builder builder) {
super.getConfig(builder);
builder.jvm.verbosegc(true)
.availableProcessors(0)
.compressedClassSpaceSize(0)
.minHeapsize(1536)
.heapsize(1536);
if (getMemoryPercentage().isPresent()) {
builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get());
} else if (isHostedVespa()) {
builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getHostClusterId().isPresent() ?
heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster :
heapSizePercentageOfTotalNodeMemory);
}
}
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
if (getParent() instanceof ConfigserverCluster) return;
for (Container container : getContainers()) {
ZookeeperServerConfig.Server.Builder serverBuilder = new ZookeeperServerConfig.Server.Builder();
serverBuilder.hostname(container.getHostName())
.id(container.index())
.joining(!previousHosts.isEmpty() &&
!previousHosts.contains(container.getHostName()));
builder.server(serverBuilder);
builder.dynamicReconfiguration(true);
}
}
public Optional<String> getTlsClientAuthority() {
return tlsClientAuthority;
}
public void setMbusParams(MbusParams mbusParams) {
this.mbusParams = mbusParams;
}
public void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; }
protected boolean messageBusEnabled() { return messageBusEnabled; }
public void addMbusServer(ComponentId chainId) {
ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer"));
addComponent(
new Component<>(new ComponentModel(new BundleInstantiationSpecification(
serviceId,
ComponentSpecification.fromString(MbusServerProvider.class.getName()),
null))));
}
@Override
public List<ApplicationClusterEndpoint> endpoints() {
return endpointList;
}
public static class MbusParams {
final Double maxConcurrentFactor;
final Double documentExpansionFactor;
final Integer containerCoreMemory;
public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) {
this.maxConcurrentFactor = maxConcurrentFactor;
this.documentExpansionFactor = documentExpansionFactor;
this.containerCoreMemory = containerCoreMemory;
}
}
} |
It is strange that this does not include san dns name for zone aws-us-west-2a ... But this is not really what you test here ... | public void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"));
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2,
InstanceName.from("main"), 8))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1,
InstanceName.from("main"), 1))
.applicationEndpoint("c", "qrs", zone1.region().value(),
Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6))
.build();
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.aws-us-east-1c.test.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.test.z.vespa-app.cloud",
"a1.t1.aws-us-east-1c.staging.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
} | List<String> expectedSans = List.of( | public void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"));
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2,
InstanceName.from("main"), 8))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1,
InstanceName.from("main"), 1))
.applicationEndpoint("c", "qrs", zone1.region().value(),
Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6))
.build();
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.aws-us-east-1c.test.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.test.z.vespa-app.cloud",
"a1.t1.aws-us-east-1c.staging.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock();
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@Before
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
public void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_public_prod() {
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.aws-us-east-1c.test.z.vespa-app.cloud",
"*.default.default.aws-us-east-1c.test.z.vespa-app.cloud",
"default.default.aws-us-east-1c.staging.z.vespa-app.cloud",
"*.default.default.aws-us-east-1c.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id",
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
public void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "uuid", List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
public void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().skip(1).findFirst().orElseThrow().getId();
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().requestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
public void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"<deployment version=\"1.0\">\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">aws-us-east-1a</region>\n" +
" <region active=\"true\">ap-northeast-1</region>\n" +
" </prod>\n" +
" </instance>\n" +
"</deployment>\n");
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock();
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@Before
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
public void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void provisions_new_certificate_in_public_prod() {
ControllerTester tester = new ControllerTester(SystemName.Public);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.aws-us-east-1c.test.z.vespa-app.cloud",
"*.default.default.aws-us-east-1c.test.z.vespa-app.cloud",
"default.default.aws-us-east-1c.staging.z.vespa-app.cloud",
"*.default.default.aws-us-east-1c.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
public void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id",
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
public void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "uuid", List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
public void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = tester.zoneRegistry().zones().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().skip(1).findFirst().orElseThrow().getId();
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().requestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
public void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"<deployment version=\"1.0\">\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">aws-us-east-1a</region>\n" +
" <region active=\"true\">ap-northeast-1</region>\n" +
" </prod>\n" +
" </instance>\n" +
"</deployment>\n");
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.